2024-12-02 15:19:25,782 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-02 15:19:25,797 main DEBUG Took 0.012131 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-02 15:19:25,798 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-02 15:19:25,798 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-02 15:19:25,801 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-02 15:19:25,804 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,816 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-02 15:19:25,855 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,857 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,859 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,860 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,864 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,865 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,868 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,869 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,870 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,870 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,872 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,873 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,875 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,876 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,877 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,879 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,880 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,880 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,881 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,883 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,884 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,886 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,889 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,890 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-02 15:19:25,892 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,893 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-02 15:19:25,898 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-02 15:19:25,901 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-02 15:19:25,904 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-02 15:19:25,912 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-02 15:19:25,915 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-02 15:19:25,915 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-02 15:19:25,927 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-02 15:19:25,934 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-02 15:19:25,944 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-02 15:19:25,944 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-02 15:19:25,945 main DEBUG createAppenders(={Console}) 2024-12-02 15:19:25,948 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 initialized 2024-12-02 15:19:25,949 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 2024-12-02 15:19:25,950 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@790da477 OK. 2024-12-02 15:19:25,952 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-02 15:19:25,952 main DEBUG OutputStream closed 2024-12-02 15:19:25,954 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-02 15:19:25,955 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-02 15:19:25,955 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@5a56cdac OK 2024-12-02 15:19:26,147 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-02 15:19:26,150 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-02 15:19:26,152 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-02 15:19:26,153 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-02 15:19:26,154 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-02 15:19:26,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-02 15:19:26,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-02 15:19:26,155 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-02 15:19:26,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-02 15:19:26,156 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-02 15:19:26,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-02 15:19:26,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-02 15:19:26,157 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-02 15:19:26,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-02 15:19:26,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-02 15:19:26,158 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-02 15:19:26,159 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-02 15:19:26,160 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-02 15:19:26,170 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02 15:19:26,171 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6f63b475) with optional ClassLoader: null 2024-12-02 15:19:26,171 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-02 15:19:26,172 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6f63b475] started OK. 2024-12-02T15:19:26,195 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-02 15:19:26,199 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-02 15:19:26,199 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-02T15:19:26,655 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60 2024-12-02T15:19:26,656 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-02T15:19:26,715 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-02T15:19:27,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-02T15:19:27,099 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc, deleteOnExit=true 2024-12-02T15:19:27,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-02T15:19:27,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/test.cache.data in system properties and HBase conf 2024-12-02T15:19:27,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T15:19:27,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir in system properties and HBase conf 2024-12-02T15:19:27,104 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T15:19:27,105 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T15:19:27,106 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T15:19:27,218 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-02T15:19:27,223 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T15:19:27,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T15:19:27,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T15:19:27,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T15:19:27,227 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T15:19:27,229 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T15:19:27,233 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T15:19:27,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T15:19:27,234 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T15:19:27,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/nfs.dump.dir in system properties and HBase conf 2024-12-02T15:19:27,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir in system properties and HBase conf 2024-12-02T15:19:27,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T15:19:27,237 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T15:19:27,238 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T15:19:28,591 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-02T15:19:28,672 INFO [Time-limited test {}] log.Log(170): Logging initialized @3892ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-02T15:19:28,767 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:28,835 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:28,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:28,881 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:28,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T15:19:28,906 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:28,911 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c3e3f70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:28,913 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@15dced0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:29,220 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@287bbdb7{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-45293-hadoop-hdfs-3_4_1-tests_jar-_-any-3342769293572371572/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T15:19:29,231 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293} 2024-12-02T15:19:29,232 INFO [Time-limited test {}] server.Server(415): Started @4453ms 2024-12-02T15:19:29,806 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:29,826 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:29,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:29,830 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:29,830 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T15:19:29,834 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c897929{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:29,835 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bf4a906{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:29,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ae93e7a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-42033-hadoop-hdfs-3_4_1-tests_jar-_-any-6399944323519162382/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:19:29,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033} 2024-12-02T15:19:29,943 INFO [Time-limited test {}] server.Server(415): Started @5164ms 2024-12-02T15:19:29,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T15:19:30,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:30,179 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:30,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:30,204 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:30,204 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T15:19:30,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621090f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:30,217 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c3f2c62{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:30,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76c02ba1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-46597-hadoop-hdfs-3_4_1-tests_jar-_-any-5955878687899819538/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:19:30,353 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597} 2024-12-02T15:19:30,353 INFO [Time-limited test {}] server.Server(415): Started @5574ms 2024-12-02T15:19:30,356 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T15:19:30,454 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:30,477 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:30,494 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:30,495 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:30,495 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T15:19:30,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26bad6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:30,497 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6632e866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:30,607 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@77f27ab{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-35927-hadoop-hdfs-3_4_1-tests_jar-_-any-889970195338541934/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:19:30,608 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927} 2024-12-02T15:19:30,609 INFO [Time-limited test {}] server.Server(415): Started @5830ms 2024-12-02T15:19:30,611 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-02T15:19:31,410 WARN [Thread-121 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,410 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,410 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,410 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,463 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T15:19:31,463 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T15:19:31,511 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xebcf2578edc6bf6a with lease ID 0x6a3ba70eb250b54a: Processing first storage report for DS-495b90e5-2933-43da-86b7-15d032db6564 from datanode DatanodeRegistration(127.0.0.1:41939, datanodeUuid=59152d84-b8ba-43fe-83c0-049166250efe, infoPort=36149, infoSecurePort=0, ipcPort=43321, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebcf2578edc6bf6a with lease ID 0x6a3ba70eb250b54a: from storage DS-495b90e5-2933-43da-86b7-15d032db6564 node DatanodeRegistration(127.0.0.1:41939, datanodeUuid=59152d84-b8ba-43fe-83c0-049166250efe, infoPort=36149, infoSecurePort=0, ipcPort=43321, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4951ddf174a4f0a with lease ID 0x6a3ba70eb250b549: Processing first storage report for DS-1e7feb52-ac3c-4679-b23f-d54b551b677f from datanode DatanodeRegistration(127.0.0.1:32893, datanodeUuid=da21253d-2aeb-4a86-a040-28953b734c51, infoPort=38521, infoSecurePort=0, ipcPort=33749, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4951ddf174a4f0a with lease ID 0x6a3ba70eb250b549: from storage DS-1e7feb52-ac3c-4679-b23f-d54b551b677f node DatanodeRegistration(127.0.0.1:32893, datanodeUuid=da21253d-2aeb-4a86-a040-28953b734c51, infoPort=38521, infoSecurePort=0, ipcPort=33749, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xebcf2578edc6bf6a with lease ID 0x6a3ba70eb250b54a: Processing first storage report for DS-eeb95f72-2a14-49b2-aaa2-30ab63273c41 from datanode DatanodeRegistration(127.0.0.1:41939, datanodeUuid=59152d84-b8ba-43fe-83c0-049166250efe, infoPort=36149, infoSecurePort=0, ipcPort=43321, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xebcf2578edc6bf6a with lease ID 0x6a3ba70eb250b54a: from storage DS-eeb95f72-2a14-49b2-aaa2-30ab63273c41 node DatanodeRegistration(127.0.0.1:41939, datanodeUuid=59152d84-b8ba-43fe-83c0-049166250efe, infoPort=36149, infoSecurePort=0, ipcPort=43321, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4951ddf174a4f0a with lease ID 0x6a3ba70eb250b549: Processing first storage report for DS-60fe235c-5988-40eb-ae2d-9efa1c94ff44 from datanode DatanodeRegistration(127.0.0.1:32893, datanodeUuid=da21253d-2aeb-4a86-a040-28953b734c51, infoPort=38521, infoSecurePort=0, ipcPort=33749, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,513 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4951ddf174a4f0a with lease ID 0x6a3ba70eb250b549: from storage DS-60fe235c-5988-40eb-ae2d-9efa1c94ff44 node DatanodeRegistration(127.0.0.1:32893, datanodeUuid=da21253d-2aeb-4a86-a040-28953b734c51, infoPort=38521, infoSecurePort=0, ipcPort=33749, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,662 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,664 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991/current, will proceed with Du for space computation calculation, 2024-12-02T15:19:31,700 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-02T15:19:31,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b99797e62d796b2 with lease ID 0x6a3ba70eb250b54b: Processing first storage report for DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871 from datanode DatanodeRegistration(127.0.0.1:45623, datanodeUuid=fa65bf0c-5e9d-4428-808c-f8f114691ef2, infoPort=34583, infoSecurePort=0, ipcPort=36271, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,707 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b99797e62d796b2 with lease ID 0x6a3ba70eb250b54b: from storage DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871 node DatanodeRegistration(127.0.0.1:45623, datanodeUuid=fa65bf0c-5e9d-4428-808c-f8f114691ef2, infoPort=34583, infoSecurePort=0, ipcPort=36271, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,707 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5b99797e62d796b2 with lease ID 0x6a3ba70eb250b54b: Processing first storage report for DS-cc504ac4-c384-41d2-a525-f652a11bdc45 from datanode DatanodeRegistration(127.0.0.1:45623, datanodeUuid=fa65bf0c-5e9d-4428-808c-f8f114691ef2, infoPort=34583, infoSecurePort=0, ipcPort=36271, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991) 2024-12-02T15:19:31,708 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5b99797e62d796b2 with lease ID 0x6a3ba70eb250b54b: from storage DS-cc504ac4-c384-41d2-a525-f652a11bdc45 node DatanodeRegistration(127.0.0.1:45623, datanodeUuid=fa65bf0c-5e9d-4428-808c-f8f114691ef2, infoPort=34583, infoSecurePort=0, ipcPort=36271, storageInfo=lv=-57;cid=testClusterID;nsid=546394189;c=1733152767991), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-02T15:19:31,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60 2024-12-02T15:19:31,857 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/zookeeper_0, clientPort=55609, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-02T15:19:31,866 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55609 2024-12-02T15:19:31,878 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:31,881 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:32,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741825_1001 (size=7) 2024-12-02T15:19:32,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741825_1001 (size=7) 2024-12-02T15:19:32,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741825_1001 (size=7) 2024-12-02T15:19:32,569 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 with version=8 2024-12-02T15:19:32,569 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/hbase-staging 2024-12-02T15:19:32,660 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-02T15:19:32,897 INFO [Time-limited test {}] client.ConnectionUtils(128): master/be0458f36726:0 server-side Connection retries=45 2024-12-02T15:19:32,907 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:32,908 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:32,913 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T15:19:32,913 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:32,914 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T15:19:33,039 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-02T15:19:33,097 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-02T15:19:33,107 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-02T15:19:33,111 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T15:19:33,140 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 106902 (auto-detected) 2024-12-02T15:19:33,142 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-02T15:19:33,161 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34441 2024-12-02T15:19:33,188 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34441 connecting to ZooKeeper ensemble=127.0.0.1:55609 2024-12-02T15:19:33,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:344410x0, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T15:19:33,288 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34441-0x10197ce54a90000 connected 2024-12-02T15:19:33,556 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,578 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:19:33,583 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657, hbase.cluster.distributed=false 2024-12-02T15:19:33,618 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T15:19:33,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34441 2024-12-02T15:19:33,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34441 2024-12-02T15:19:33,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34441 2024-12-02T15:19:33,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34441 2024-12-02T15:19:33,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34441 2024-12-02T15:19:33,708 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/be0458f36726:0 server-side Connection retries=45 2024-12-02T15:19:33,709 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,710 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T15:19:33,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,710 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T15:19:33,713 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T15:19:33,714 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T15:19:33,715 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43025 2024-12-02T15:19:33,717 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43025 connecting to ZooKeeper ensemble=127.0.0.1:55609 2024-12-02T15:19:33,718 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,721 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:430250x0, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T15:19:33,738 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:430250x0, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:19:33,738 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43025-0x10197ce54a90001 connected 2024-12-02T15:19:33,742 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T15:19:33,752 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T15:19:33,755 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T15:19:33,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T15:19:33,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43025 2024-12-02T15:19:33,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43025 2024-12-02T15:19:33,762 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43025 2024-12-02T15:19:33,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43025 2024-12-02T15:19:33,771 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43025 2024-12-02T15:19:33,788 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/be0458f36726:0 server-side Connection retries=45 2024-12-02T15:19:33,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,789 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,789 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T15:19:33,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,790 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T15:19:33,790 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T15:19:33,791 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T15:19:33,792 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45405 2024-12-02T15:19:33,794 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45405 connecting to ZooKeeper ensemble=127.0.0.1:55609 2024-12-02T15:19:33,795 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,797 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,812 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:454050x0, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T15:19:33,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:454050x0, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:19:33,813 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45405-0x10197ce54a90002 connected 2024-12-02T15:19:33,814 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T15:19:33,815 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T15:19:33,817 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T15:19:33,819 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T15:19:33,822 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45405 2024-12-02T15:19:33,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45405 2024-12-02T15:19:33,823 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45405 2024-12-02T15:19:33,826 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45405 2024-12-02T15:19:33,827 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45405 2024-12-02T15:19:33,848 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/be0458f36726:0 server-side Connection retries=45 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-02T15:19:33,849 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-02T15:19:33,850 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-02T15:19:33,851 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35155 2024-12-02T15:19:33,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35155 connecting to ZooKeeper ensemble=127.0.0.1:55609 2024-12-02T15:19:33,854 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,856 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:33,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351550x0, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-02T15:19:33,872 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:19:33,872 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35155-0x10197ce54a90003 connected 2024-12-02T15:19:33,873 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-02T15:19:33,879 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-02T15:19:33,880 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-02T15:19:33,882 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-02T15:19:33,883 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35155 2024-12-02T15:19:33,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35155 2024-12-02T15:19:33,884 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35155 2024-12-02T15:19:33,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35155 2024-12-02T15:19:33,889 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35155 2024-12-02T15:19:33,915 DEBUG [M:0;be0458f36726:34441 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;be0458f36726:34441 2024-12-02T15:19:33,919 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/be0458f36726,34441,1733152772730 2024-12-02T15:19:33,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,938 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,938 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,940 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/be0458f36726,34441,1733152772730 2024-12-02T15:19:33,978 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T15:19:33,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T15:19:33,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-02T15:19:33,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:33,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:33,979 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:33,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:33,981 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-02T15:19:33,982 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/be0458f36726,34441,1733152772730 from backup master directory 2024-12-02T15:19:33,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/be0458f36726,34441,1733152772730 2024-12-02T15:19:33,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,988 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-02T15:19:33,989 WARN [master/be0458f36726:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T15:19:33,989 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=be0458f36726,34441,1733152772730 2024-12-02T15:19:33,991 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-02T15:19:33,993 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-02T15:19:34,050 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/hbase.id] with ID: 8e50677f-f1c7-447a-8806-ffbc6b162076 2024-12-02T15:19:34,050 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.tmp/hbase.id 2024-12-02T15:19:34,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741826_1002 (size=42) 2024-12-02T15:19:34,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741826_1002 (size=42) 2024-12-02T15:19:34,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741826_1002 (size=42) 2024-12-02T15:19:34,073 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.tmp/hbase.id]:[hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/hbase.id] 2024-12-02T15:19:34,139 INFO [master/be0458f36726:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:34,144 INFO [master/be0458f36726:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-02T15:19:34,162 INFO [master/be0458f36726:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-12-02T15:19:34,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,170 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741827_1003 (size=196) 2024-12-02T15:19:34,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741827_1003 (size=196) 2024-12-02T15:19:34,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741827_1003 (size=196) 2024-12-02T15:19:34,220 INFO [master/be0458f36726:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:19:34,223 INFO [master/be0458f36726:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-02T15:19:34,238 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:19:34,242 INFO [master/be0458f36726:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T15:19:34,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741828_1004 (size=1189) 2024-12-02T15:19:34,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741828_1004 (size=1189) 2024-12-02T15:19:34,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741828_1004 (size=1189) 2024-12-02T15:19:34,301 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/data/master/store 2024-12-02T15:19:34,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741829_1005 (size=34) 2024-12-02T15:19:34,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741829_1005 (size=34) 2024-12-02T15:19:34,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741829_1005 (size=34) 2024-12-02T15:19:34,331 INFO [master/be0458f36726:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-02T15:19:34,334 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:34,336 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T15:19:34,336 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:19:34,336 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:19:34,338 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T15:19:34,338 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:19:34,338 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:19:34,340 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733152774335Disabling compacts and flushes for region at 1733152774335Disabling writes for close at 1733152774338 (+3 ms)Writing region close event to WAL at 1733152774338Closed at 1733152774338 2024-12-02T15:19:34,342 WARN [master/be0458f36726:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/data/master/store/.initializing 2024-12-02T15:19:34,342 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730 2024-12-02T15:19:34,351 INFO [master/be0458f36726:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T15:19:34,367 INFO [master/be0458f36726:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=be0458f36726%2C34441%2C1733152772730, suffix=, logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730, archiveDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/oldWALs, maxLogs=10 2024-12-02T15:19:34,395 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373, exclude list is [], retry=0 2024-12-02T15:19:34,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41939,DS-495b90e5-2933-43da-86b7-15d032db6564,DISK] 2024-12-02T15:19:34,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45623,DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871,DISK] 2024-12-02T15:19:34,416 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32893,DS-1e7feb52-ac3c-4679-b23f-d54b551b677f,DISK] 2024-12-02T15:19:34,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-02T15:19:34,462 INFO [master/be0458f36726:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 2024-12-02T15:19:34,463 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:38521:38521)] 2024-12-02T15:19:34,464 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-02T15:19:34,464 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:34,468 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,470 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,514 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,539 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-02T15:19:34,543 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:34,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:34,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-02T15:19:34,552 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:34,553 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:34,554 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-02T15:19:34,559 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:34,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:34,561 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,565 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-02T15:19:34,565 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:34,567 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:34,567 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,572 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,574 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,584 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,585 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,590 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T15:19:34,594 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-02T15:19:34,599 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:34,601 INFO [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64667164, jitterRate=-0.03638416528701782}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T15:19:34,606 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733152774487Initializing all the Stores at 1733152774489 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152774490 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152774491 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152774492 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152774492Cleaning up temporary data from old regions at 1733152774585 (+93 ms)Region opened successfully at 1733152774606 (+21 ms) 2024-12-02T15:19:34,607 INFO [master/be0458f36726:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-02T15:19:34,647 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@179e20bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=be0458f36726/172.17.0.2:0 2024-12-02T15:19:34,681 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-02T15:19:34,693 INFO [master/be0458f36726:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-02T15:19:34,693 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-02T15:19:34,697 INFO [master/be0458f36726:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-02T15:19:34,699 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-02T15:19:34,705 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-12-02T15:19:34,705 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-02T15:19:34,733 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-02T15:19:34,744 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-02T15:19:34,753 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-02T15:19:34,757 INFO [master/be0458f36726:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-02T15:19:34,759 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-02T15:19:34,761 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-02T15:19:34,765 INFO [master/be0458f36726:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-02T15:19:34,770 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-02T15:19:34,778 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-02T15:19:34,780 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-02T15:19:34,786 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-02T15:19:34,805 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-02T15:19:34,812 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,820 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-02T15:19:34,820 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,824 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=be0458f36726,34441,1733152772730, sessionid=0x10197ce54a90000, setting cluster-up flag (Was=false) 2024-12-02T15:19:34,845 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,870 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-02T15:19:34,872 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=be0458f36726,34441,1733152772730 2024-12-02T15:19:34,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,887 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,887 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:34,912 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-02T15:19:34,913 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=be0458f36726,34441,1733152772730 2024-12-02T15:19:34,922 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-02T15:19:34,969 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-12-02T15:19:34,974 INFO [master/be0458f36726:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:34,975 INFO [master/be0458f36726:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-02T15:19:34,995 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(746): ClusterId : 8e50677f-f1c7-447a-8806-ffbc6b162076 2024-12-02T15:19:34,995 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(746): ClusterId : 8e50677f-f1c7-447a-8806-ffbc6b162076 2024-12-02T15:19:34,995 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(746): ClusterId : 8e50677f-f1c7-447a-8806-ffbc6b162076 2024-12-02T15:19:34,998 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T15:19:34,998 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T15:19:35,001 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-02T15:19:35,015 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T15:19:35,015 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T15:19:35,015 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T15:19:35,015 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T15:19:35,015 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-02T15:19:35,015 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-02T15:19:35,030 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T15:19:35,030 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T15:19:35,031 DEBUG [RS:1;be0458f36726:45405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73021319, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=be0458f36726/172.17.0.2:0 2024-12-02T15:19:35,031 DEBUG [RS:2;be0458f36726:35155 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52833eaf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=be0458f36726/172.17.0.2:0 2024-12-02T15:19:35,037 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-02T15:19:35,039 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-02T15:19:35,040 DEBUG [RS:0;be0458f36726:43025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30d2d983, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=be0458f36726/172.17.0.2:0 2024-12-02T15:19:35,049 INFO [master/be0458f36726:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-02T15:19:35,051 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;be0458f36726:45405 2024-12-02T15:19:35,055 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;be0458f36726:35155 2024-12-02T15:19:35,056 INFO [RS:1;be0458f36726:45405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T15:19:35,056 INFO [RS:2;be0458f36726:35155 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T15:19:35,056 INFO [RS:1;be0458f36726:45405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T15:19:35,056 INFO [RS:2;be0458f36726:35155 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T15:19:35,056 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T15:19:35,056 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T15:19:35,057 INFO [RS:1;be0458f36726:45405 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:35,057 INFO [RS:2;be0458f36726:35155 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:35,057 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T15:19:35,057 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T15:19:35,058 INFO [master/be0458f36726:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-02T15:19:35,058 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;be0458f36726:43025 2024-12-02T15:19:35,058 INFO [RS:0;be0458f36726:43025 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-02T15:19:35,058 INFO [RS:0;be0458f36726:43025 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-02T15:19:35,058 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-02T15:19:35,059 INFO [RS:0;be0458f36726:43025 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:35,059 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-02T15:19:35,061 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=45405, startcode=1733152773787 2024-12-02T15:19:35,062 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=35155, startcode=1733152773848 2024-12-02T15:19:35,063 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=43025, startcode=1733152773677 2024-12-02T15:19:35,066 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: be0458f36726,34441,1733152772730 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-02T15:19:35,076 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/be0458f36726:0, corePoolSize=5, maxPoolSize=5 2024-12-02T15:19:35,076 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/be0458f36726:0, corePoolSize=5, maxPoolSize=5 2024-12-02T15:19:35,076 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/be0458f36726:0, corePoolSize=5, maxPoolSize=5 2024-12-02T15:19:35,076 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/be0458f36726:0, corePoolSize=5, maxPoolSize=5 2024-12-02T15:19:35,076 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/be0458f36726:0, corePoolSize=10, maxPoolSize=10 2024-12-02T15:19:35,077 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,077 DEBUG [RS:2;be0458f36726:35155 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T15:19:35,077 DEBUG [RS:1;be0458f36726:45405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T15:19:35,077 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/be0458f36726:0, corePoolSize=2, maxPoolSize=2 2024-12-02T15:19:35,077 DEBUG [RS:0;be0458f36726:43025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-02T15:19:35,077 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,099 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733152805098 2024-12-02T15:19:35,101 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-02T15:19:35,102 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-02T15:19:35,106 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-02T15:19:35,107 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-02T15:19:35,107 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-02T15:19:35,107 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-02T15:19:35,108 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,114 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T15:19:35,114 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-02T15:19:35,120 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:35,121 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T15:19:35,127 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-02T15:19:35,129 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-02T15:19:35,129 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-02T15:19:35,133 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49255, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T15:19:35,133 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44125, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T15:19:35,133 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46181, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-02T15:19:35,139 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-02T15:19:35,142 INFO [master/be0458f36726:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-02T15:19:35,142 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-02T15:19:35,148 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-02T15:19:35,149 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-02T15:19:35,161 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/be0458f36726:0:becomeActiveMaster-HFileCleaner.large.0-1733152775144,5,FailOnTimeoutGroup] 2024-12-02T15:19:35,162 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/be0458f36726:0:becomeActiveMaster-HFileCleaner.small.0-1733152775161,5,FailOnTimeoutGroup] 2024-12-02T15:19:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741831_1007 (size=1321) 2024-12-02T15:19:35,164 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,164 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-02T15:19:35,166 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,166 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741831_1007 (size=1321) 2024-12-02T15:19:35,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741831_1007 (size=1321) 2024-12-02T15:19:35,172 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-02T15:19:35,173 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:35,178 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T15:19:35,178 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T15:19:35,178 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-02T15:19:35,178 WARN [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T15:19:35,178 WARN [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T15:19:35,178 WARN [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-02T15:19:35,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741832_1008 (size=32) 2024-12-02T15:19:35,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741832_1008 (size=32) 2024-12-02T15:19:35,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741832_1008 (size=32) 2024-12-02T15:19:35,196 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:35,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T15:19:35,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T15:19:35,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:35,204 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:35,205 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T15:19:35,208 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T15:19:35,208 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:35,209 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:35,210 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T15:19:35,212 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T15:19:35,213 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:35,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:35,214 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T15:19:35,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T15:19:35,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:35,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:35,219 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T15:19:35,221 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740 2024-12-02T15:19:35,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740 2024-12-02T15:19:35,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T15:19:35,227 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T15:19:35,230 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T15:19:35,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T15:19:35,239 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:35,241 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74826117, jitterRate=0.1149960309267044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T15:19:35,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733152775196Initializing all the Stores at 1733152775199 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152775199Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152775199Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152775199Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152775199Cleaning up temporary data from old regions at 1733152775227 (+28 ms)Region opened successfully at 1733152775243 (+16 ms) 2024-12-02T15:19:35,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T15:19:35,244 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T15:19:35,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T15:19:35,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T15:19:35,244 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T15:19:35,245 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T15:19:35,246 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733152775243Disabling compacts and flushes for region at 1733152775243Disabling writes for close at 1733152775244 (+1 ms)Writing region close event to WAL at 1733152775245 (+1 ms)Closed at 1733152775245 2024-12-02T15:19:35,250 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T15:19:35,251 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-02T15:19:35,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-02T15:19:35,267 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T15:19:35,272 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-02T15:19:35,280 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=35155, startcode=1733152773848 2024-12-02T15:19:35,280 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=43025, startcode=1733152773677 2024-12-02T15:19:35,280 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(2659): reportForDuty to master=be0458f36726,34441,1733152772730 with port=45405, startcode=1733152773787 2024-12-02T15:19:35,283 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(363): Checking decommissioned status of RegionServer be0458f36726,43025,1733152773677 2024-12-02T15:19:35,285 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(517): Registering regionserver=be0458f36726,43025,1733152773677 2024-12-02T15:19:35,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(363): Checking decommissioned status of RegionServer be0458f36726,35155,1733152773848 2024-12-02T15:19:35,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(517): Registering regionserver=be0458f36726,35155,1733152773848 2024-12-02T15:19:35,295 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:35,295 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36329 2024-12-02T15:19:35,295 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T15:19:35,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(363): Checking decommissioned status of RegionServer be0458f36726,45405,1733152773787 2024-12-02T15:19:35,298 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] master.ServerManager(517): Registering regionserver=be0458f36726,45405,1733152773787 2024-12-02T15:19:35,298 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:35,298 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36329 2024-12-02T15:19:35,298 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T15:19:35,301 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:35,301 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36329 2024-12-02T15:19:35,301 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-02T15:19:35,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T15:19:35,358 DEBUG [RS:0;be0458f36726:43025 {}] zookeeper.ZKUtil(111): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/be0458f36726,43025,1733152773677 2024-12-02T15:19:35,358 DEBUG [RS:2;be0458f36726:35155 {}] zookeeper.ZKUtil(111): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/be0458f36726,35155,1733152773848 2024-12-02T15:19:35,358 WARN [RS:0;be0458f36726:43025 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T15:19:35,358 DEBUG [RS:1;be0458f36726:45405 {}] zookeeper.ZKUtil(111): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/be0458f36726,45405,1733152773787 2024-12-02T15:19:35,358 WARN [RS:2;be0458f36726:35155 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T15:19:35,358 INFO [RS:0;be0458f36726:43025 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T15:19:35,358 INFO [RS:2;be0458f36726:35155 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T15:19:35,359 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,43025,1733152773677 2024-12-02T15:19:35,359 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,35155,1733152773848 2024-12-02T15:19:35,358 WARN [RS:1;be0458f36726:45405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-02T15:19:35,361 INFO [RS:1;be0458f36726:45405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T15:19:35,361 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [be0458f36726,35155,1733152773848] 2024-12-02T15:19:35,361 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [be0458f36726,45405,1733152773787] 2024-12-02T15:19:35,361 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [be0458f36726,43025,1733152773677] 2024-12-02T15:19:35,361 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787 2024-12-02T15:19:35,393 INFO [RS:1;be0458f36726:45405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T15:19:35,395 INFO [RS:0;be0458f36726:43025 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T15:19:35,396 INFO [RS:2;be0458f36726:35155 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-02T15:19:35,413 INFO [RS:1;be0458f36726:45405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T15:19:35,423 INFO [RS:0;be0458f36726:43025 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T15:19:35,423 WARN [be0458f36726:34441 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-02T15:19:35,424 INFO [RS:2;be0458f36726:35155 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-02T15:19:35,432 INFO [RS:0;be0458f36726:43025 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T15:19:35,433 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,434 INFO [RS:1;be0458f36726:45405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T15:19:35,434 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,435 INFO [RS:2;be0458f36726:35155 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-02T15:19:35,435 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,437 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T15:19:35,449 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T15:19:35,455 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-02T15:19:35,456 INFO [RS:1;be0458f36726:45405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T15:19:35,457 INFO [RS:2;be0458f36726:35155 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T15:19:35,457 INFO [RS:0;be0458f36726:43025 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-02T15:19:35,458 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,458 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,458 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,458 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,458 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/be0458f36726:0, corePoolSize=2, maxPoolSize=2 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/be0458f36726:0, corePoolSize=2, maxPoolSize=2 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/be0458f36726:0, corePoolSize=2, maxPoolSize=2 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,459 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/be0458f36726:0, corePoolSize=1, maxPoolSize=1 2024-12-02T15:19:35,460 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,460 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,460 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,460 DEBUG [RS:0;be0458f36726:43025 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,460 DEBUG [RS:1;be0458f36726:45405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,460 DEBUG [RS:2;be0458f36726:35155 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/be0458f36726:0, corePoolSize=3, maxPoolSize=3 2024-12-02T15:19:35,474 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,475 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,475 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,475 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,475 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,475 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,45405,1733152773787-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T15:19:35,531 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,531 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,531 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,531 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,532 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,532 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,43025,1733152773677-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,538 INFO [RS:1;be0458f36726:45405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T15:19:35,538 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,35155,1733152773848-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T15:19:35,541 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,45405,1733152773787-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,542 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,542 INFO [RS:1;be0458f36726:45405 {}] regionserver.Replication(171): be0458f36726,45405,1733152773787 started 2024-12-02T15:19:35,554 INFO [RS:0;be0458f36726:43025 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T15:19:35,554 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,43025,1733152773677-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,555 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,555 INFO [RS:0;be0458f36726:43025 {}] regionserver.Replication(171): be0458f36726,43025,1733152773677 started 2024-12-02T15:19:35,567 INFO [RS:2;be0458f36726:35155 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-02T15:19:35,567 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,35155,1733152773848-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,568 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,568 INFO [RS:2;be0458f36726:35155 {}] regionserver.Replication(171): be0458f36726,35155,1733152773848 started 2024-12-02T15:19:35,584 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,585 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1482): Serving as be0458f36726,45405,1733152773787, RpcServer on be0458f36726/172.17.0.2:45405, sessionid=0x10197ce54a90002 2024-12-02T15:19:35,586 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,586 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T15:19:35,586 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1482): Serving as be0458f36726,43025,1733152773677, RpcServer on be0458f36726/172.17.0.2:43025, sessionid=0x10197ce54a90001 2024-12-02T15:19:35,586 DEBUG [RS:1;be0458f36726:45405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager be0458f36726,45405,1733152773787 2024-12-02T15:19:35,587 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T15:19:35,587 DEBUG [RS:0;be0458f36726:43025 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager be0458f36726,43025,1733152773677 2024-12-02T15:19:35,587 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,45405,1733152773787' 2024-12-02T15:19:35,587 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,43025,1733152773677' 2024-12-02T15:19:35,587 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T15:19:35,587 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T15:19:35,588 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T15:19:35,589 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T15:19:35,589 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T15:19:35,590 DEBUG [RS:0;be0458f36726:43025 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager be0458f36726,43025,1733152773677 2024-12-02T15:19:35,590 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:35,590 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,43025,1733152773677' 2024-12-02T15:19:35,590 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T15:19:35,591 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1482): Serving as be0458f36726,35155,1733152773848, RpcServer on be0458f36726/172.17.0.2:35155, sessionid=0x10197ce54a90003 2024-12-02T15:19:35,591 DEBUG [RS:0;be0458f36726:43025 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T15:19:35,592 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T15:19:35,592 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-02T15:19:35,592 DEBUG [RS:0;be0458f36726:43025 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T15:19:35,592 DEBUG [RS:2;be0458f36726:35155 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager be0458f36726,35155,1733152773848 2024-12-02T15:19:35,592 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,35155,1733152773848' 2024-12-02T15:19:35,592 INFO [RS:0;be0458f36726:43025 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T15:19:35,592 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-02T15:19:35,592 INFO [RS:0;be0458f36726:43025 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T15:19:35,593 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-02T15:19:35,593 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T15:19:35,593 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T15:19:35,593 DEBUG [RS:1;be0458f36726:45405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager be0458f36726,45405,1733152773787 2024-12-02T15:19:35,593 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,45405,1733152773787' 2024-12-02T15:19:35,593 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T15:19:35,596 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-02T15:19:35,597 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-02T15:19:35,597 DEBUG [RS:2;be0458f36726:35155 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager be0458f36726,35155,1733152773848 2024-12-02T15:19:35,597 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'be0458f36726,35155,1733152773848' 2024-12-02T15:19:35,597 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-02T15:19:35,598 DEBUG [RS:2;be0458f36726:35155 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T15:19:35,599 DEBUG [RS:2;be0458f36726:35155 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T15:19:35,599 INFO [RS:2;be0458f36726:35155 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T15:19:35,600 INFO [RS:2;be0458f36726:35155 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T15:19:35,602 DEBUG [RS:1;be0458f36726:45405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-02T15:19:35,604 DEBUG [RS:1;be0458f36726:45405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-02T15:19:35,604 INFO [RS:1;be0458f36726:45405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-02T15:19:35,604 INFO [RS:1;be0458f36726:45405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-02T15:19:35,698 INFO [RS:0;be0458f36726:43025 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T15:19:35,701 INFO [RS:2;be0458f36726:35155 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T15:19:35,702 INFO [RS:0;be0458f36726:43025 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=be0458f36726%2C43025%2C1733152773677, suffix=, logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,43025,1733152773677, archiveDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs, maxLogs=32 2024-12-02T15:19:35,705 INFO [RS:1;be0458f36726:45405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-02T15:19:35,706 INFO [RS:2;be0458f36726:35155 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=be0458f36726%2C35155%2C1733152773848, suffix=, logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,35155,1733152773848, archiveDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs, maxLogs=32 2024-12-02T15:19:35,711 INFO [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=be0458f36726%2C45405%2C1733152773787, suffix=, logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787, archiveDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs, maxLogs=32 2024-12-02T15:19:35,731 DEBUG [RS:0;be0458f36726:43025 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,43025,1733152773677/be0458f36726%2C43025%2C1733152773677.1733152775706, exclude list is [], retry=0 2024-12-02T15:19:35,737 DEBUG [RS:2;be0458f36726:35155 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,35155,1733152773848/be0458f36726%2C35155%2C1733152773848.1733152775711, exclude list is [], retry=0 2024-12-02T15:19:35,740 DEBUG [RS:1;be0458f36726:45405 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787/be0458f36726%2C45405%2C1733152773787.1733152775714, exclude list is [], retry=0 2024-12-02T15:19:35,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45623,DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871,DISK] 2024-12-02T15:19:35,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32893,DS-1e7feb52-ac3c-4679-b23f-d54b551b677f,DISK] 2024-12-02T15:19:35,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41939,DS-495b90e5-2933-43da-86b7-15d032db6564,DISK] 2024-12-02T15:19:35,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45623,DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871,DISK] 2024-12-02T15:19:35,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41939,DS-495b90e5-2933-43da-86b7-15d032db6564,DISK] 2024-12-02T15:19:35,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32893,DS-1e7feb52-ac3c-4679-b23f-d54b551b677f,DISK] 2024-12-02T15:19:35,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45623,DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871,DISK] 2024-12-02T15:19:35,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41939,DS-495b90e5-2933-43da-86b7-15d032db6564,DISK] 2024-12-02T15:19:35,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32893,DS-1e7feb52-ac3c-4679-b23f-d54b551b677f,DISK] 2024-12-02T15:19:35,797 INFO [RS:2;be0458f36726:35155 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,35155,1733152773848/be0458f36726%2C35155%2C1733152773848.1733152775711 2024-12-02T15:19:35,811 DEBUG [RS:2;be0458f36726:35155 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:36149:36149)] 2024-12-02T15:19:35,819 INFO [RS:0;be0458f36726:43025 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,43025,1733152773677/be0458f36726%2C43025%2C1733152773677.1733152775706 2024-12-02T15:19:35,845 DEBUG [RS:0;be0458f36726:43025 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:36149:36149)] 2024-12-02T15:19:35,846 INFO [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787/be0458f36726%2C45405%2C1733152773787.1733152775714 2024-12-02T15:19:35,847 DEBUG [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:38521:38521),(127.0.0.1/127.0.0.1:36149:36149)] 2024-12-02T15:19:35,927 DEBUG [be0458f36726:34441 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-02T15:19:35,939 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:19:35,951 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:19:35,951 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:19:35,951 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:19:35,952 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:19:35,952 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:19:35,952 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:19:35,952 INFO [be0458f36726:34441 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:19:35,952 INFO [be0458f36726:34441 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:19:35,952 INFO [be0458f36726:34441 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:19:35,952 DEBUG [be0458f36726:34441 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:19:35,972 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:19:35,980 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as be0458f36726,45405,1733152773787, state=OPENING 2024-12-02T15:19:36,011 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-02T15:19:36,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:36,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:36,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:36,020 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:36,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,023 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,023 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-02T15:19:36,024 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,026 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:19:36,206 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:36,208 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50855, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:36,220 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-02T15:19:36,220 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-02T15:19:36,221 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-02T15:19:36,224 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=be0458f36726%2C45405%2C1733152773787.meta, suffix=.meta, logDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787, archiveDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs, maxLogs=32 2024-12-02T15:19:36,237 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787/be0458f36726%2C45405%2C1733152773787.meta.1733152776225.meta, exclude list is [], retry=0 2024-12-02T15:19:36,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41939,DS-495b90e5-2933-43da-86b7-15d032db6564,DISK] 2024-12-02T15:19:36,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45623,DS-e14bf018-ab4b-4ae5-81ba-6ab8b232d871,DISK] 2024-12-02T15:19:36,243 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:32893,DS-1e7feb52-ac3c-4679-b23f-d54b551b677f,DISK] 2024-12-02T15:19:36,249 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/WALs/be0458f36726,45405,1733152773787/be0458f36726%2C45405%2C1733152773787.meta.1733152776225.meta 2024-12-02T15:19:36,249 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34583:34583),(127.0.0.1/127.0.0.1:36149:36149),(127.0.0.1/127.0.0.1:38521:38521)] 2024-12-02T15:19:36,250 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-02T15:19:36,252 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-02T15:19:36,253 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:36,254 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-02T15:19:36,256 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-02T15:19:36,257 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-02T15:19:36,265 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-02T15:19:36,265 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:36,265 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-02T15:19:36,266 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-02T15:19:36,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-02T15:19:36,271 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-02T15:19:36,271 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:36,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:36,272 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-02T15:19:36,274 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-02T15:19:36,274 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:36,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:36,275 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-02T15:19:36,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-02T15:19:36,277 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:36,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:36,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-02T15:19:36,280 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-02T15:19:36,280 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:36,281 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-02T15:19:36,281 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-02T15:19:36,283 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740 2024-12-02T15:19:36,287 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740 2024-12-02T15:19:36,290 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-02T15:19:36,290 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-02T15:19:36,291 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-02T15:19:36,295 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-02T15:19:36,297 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71630367, jitterRate=0.06737564504146576}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-02T15:19:36,297 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-02T15:19:36,300 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733152776266Writing region info on filesystem at 1733152776266Initializing all the Stores at 1733152776268 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152776268Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152776269 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152776269Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152776269Cleaning up temporary data from old regions at 1733152776290 (+21 ms)Running coprocessor post-open hooks at 1733152776297 (+7 ms)Region opened successfully at 1733152776300 (+3 ms) 2024-12-02T15:19:36,306 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733152776198 2024-12-02T15:19:36,317 DEBUG [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-02T15:19:36,318 INFO [RS_OPEN_META-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-02T15:19:36,319 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:19:36,321 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as be0458f36726,45405,1733152773787, state=OPEN 2024-12-02T15:19:36,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T15:19:36,328 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T15:19:36,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T15:19:36,328 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-02T15:19:36,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,328 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-02T15:19:36,329 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=be0458f36726,45405,1733152773787 2024-12-02T15:19:36,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-02T15:19:36,336 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=be0458f36726,45405,1733152773787 in 303 msec 2024-12-02T15:19:36,346 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-02T15:19:36,347 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0790 sec 2024-12-02T15:19:36,351 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-02T15:19:36,351 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-02T15:19:36,374 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:36,376 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:36,404 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:36,407 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38873, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:36,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4540 sec 2024-12-02T15:19:36,439 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733152776438, completionTime=-1 2024-12-02T15:19:36,444 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-02T15:19:36,444 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-02T15:19:36,478 INFO [master/be0458f36726:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-02T15:19:36,478 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733152836478 2024-12-02T15:19:36,479 INFO [master/be0458f36726:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733152896479 2024-12-02T15:19:36,479 INFO [master/be0458f36726:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 34 msec 2024-12-02T15:19:36,480 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:19:36,489 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,489 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,489 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,492 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-be0458f36726:34441, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,496 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,503 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:36,508 DEBUG [master/be0458f36726:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T15:19:36,542 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.553sec 2024-12-02T15:19:36,546 INFO [master/be0458f36726:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-02T15:19:36,548 INFO [master/be0458f36726:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-02T15:19:36,577 INFO [master/be0458f36726:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-02T15:19:36,578 INFO [master/be0458f36726:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-02T15:19:36,578 INFO [master/be0458f36726:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-02T15:19:36,580 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-02T15:19:36,583 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-02T15:19:36,629 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b88e3fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:36,641 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-02T15:19:36,641 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-02T15:19:36,647 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T15:19:36,648 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is be0458f36726,34441,1733152772730 2024-12-02T15:19:36,652 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6b7bf70f 2024-12-02T15:19:36,652 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:36,653 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T15:19:36,655 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:36,655 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50891, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T15:19:36,666 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-02T15:19:36,670 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:36,674 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:36,674 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:36,678 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c202db0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:36,678 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:36,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-12-02T15:19:36,683 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:36,684 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:19:36,685 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:36,688 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-12-02T15:19:36,692 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:19:36,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T15:19:36,701 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:36,702 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46218, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:36,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9459ec4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:36,706 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:36,719 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:36,719 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:36,724 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33686, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=be0458f36726,34441,1733152772730 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/test.cache.data in system properties and HBase conf 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.tmp.dir in system properties and HBase conf 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir in system properties and HBase conf 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-02T15:19:36,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-02T15:19:36,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/nfs.dump.dir in system properties and HBase conf 2024-12-02T15:19:36,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir in system properties and HBase conf 2024-12-02T15:19:36,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-02T15:19:36,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-02T15:19:36,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-02T15:19:36,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741837_1013 (size=349) 2024-12-02T15:19:36,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741837_1013 (size=349) 2024-12-02T15:19:36,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741837_1013 (size=349) 2024-12-02T15:19:36,755 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 82c95700d9ff2efea2507bd84223c4b0, NAME => 'hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741838_1014 (size=36) 2024-12-02T15:19:36,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741838_1014 (size=36) 2024-12-02T15:19:36,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741838_1014 (size=36) 2024-12-02T15:19:36,802 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:36,806 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 82c95700d9ff2efea2507bd84223c4b0, disabling compactions & flushes 2024-12-02T15:19:36,806 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:36,806 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:36,806 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. after waiting 0 ms 2024-12-02T15:19:36,806 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:36,806 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:36,806 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 82c95700d9ff2efea2507bd84223c4b0: Waiting for close lock at 1733152776806Disabling compacts and flushes for region at 1733152776806Disabling writes for close at 1733152776806Writing region close event to WAL at 1733152776806Closed at 1733152776806 2024-12-02T15:19:36,809 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:19:36,817 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733152776811"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152776811"}]},"ts":"1733152776811"} 2024-12-02T15:19:36,823 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T15:19:36,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T15:19:36,827 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:19:36,832 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152776827"}]},"ts":"1733152776827"} 2024-12-02T15:19:36,838 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-02T15:19:36,839 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:19:36,841 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:19:36,841 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:19:36,841 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:19:36,842 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:19:36,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:19:36,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741839_1015 (size=592039) 2024-12-02T15:19:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741839_1015 (size=592039) 2024-12-02T15:19:36,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741839_1015 (size=592039) 2024-12-02T15:19:36,845 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=82c95700d9ff2efea2507bd84223c4b0, ASSIGN}] 2024-12-02T15:19:36,858 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=82c95700d9ff2efea2507bd84223c4b0, ASSIGN 2024-12-02T15:19:36,862 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=82c95700d9ff2efea2507bd84223c4b0, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:19:36,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T15:19:36,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T15:19:36,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741840_1016 (size=1663647) 2024-12-02T15:19:37,016 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T15:19:37,017 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=82c95700d9ff2efea2507bd84223c4b0, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:37,024 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=82c95700d9ff2efea2507bd84223c4b0, ASSIGN because future has completed 2024-12-02T15:19:37,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T15:19:37,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82c95700d9ff2efea2507bd84223c4b0, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:19:37,215 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:37,238 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52099, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:37,247 INFO [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:37,247 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 82c95700d9ff2efea2507bd84223c4b0, NAME => 'hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.', STARTKEY => '', ENDKEY => ''} 2024-12-02T15:19:37,248 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. service=AccessControlService 2024-12-02T15:19:37,248 INFO [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:37,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:37,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,249 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,253 INFO [StoreOpener-82c95700d9ff2efea2507bd84223c4b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,256 INFO [StoreOpener-82c95700d9ff2efea2507bd84223c4b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 82c95700d9ff2efea2507bd84223c4b0 columnFamilyName l 2024-12-02T15:19:37,256 DEBUG [StoreOpener-82c95700d9ff2efea2507bd84223c4b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:37,259 INFO [StoreOpener-82c95700d9ff2efea2507bd84223c4b0-1 {}] regionserver.HStore(327): Store=82c95700d9ff2efea2507bd84223c4b0/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:37,259 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,261 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,263 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,264 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,265 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,271 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,284 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:37,285 INFO [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 82c95700d9ff2efea2507bd84223c4b0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70155451, jitterRate=0.04539768397808075}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:37,286 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:19:37,288 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 82c95700d9ff2efea2507bd84223c4b0: Running coprocessor pre-open hook at 1733152777249Writing region info on filesystem at 1733152777249Initializing all the Stores at 1733152777252 (+3 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733152777252Cleaning up temporary data from old regions at 1733152777265 (+13 ms)Running coprocessor post-open hooks at 1733152777286 (+21 ms)Region opened successfully at 1733152777288 (+2 ms) 2024-12-02T15:19:37,291 INFO [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., pid=6, masterSystemTime=1733152777215 2024-12-02T15:19:37,299 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:37,299 INFO [RS_OPEN_PRIORITY_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:19:37,299 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=82c95700d9ff2efea2507bd84223c4b0, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:37,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 82c95700d9ff2efea2507bd84223c4b0, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:19:37,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-02T15:19:37,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 82c95700d9ff2efea2507bd84223c4b0, server=be0458f36726,43025,1733152773677 in 270 msec 2024-12-02T15:19:37,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-02T15:19:37,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=82c95700d9ff2efea2507bd84223c4b0, ASSIGN in 476 msec 2024-12-02T15:19:37,337 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:19:37,338 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152777338"}]},"ts":"1733152777338"} 2024-12-02T15:19:37,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T15:19:37,348 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-02T15:19:37,351 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:19:37,356 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 680 msec 2024-12-02T15:19:37,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-02T15:19:37,868 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-12-02T15:19:37,875 DEBUG [master/be0458f36726:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-02T15:19:37,876 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-02T15:19:37,877 INFO [master/be0458f36726:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=be0458f36726,34441,1733152772730-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-02T15:19:38,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:38,669 WARN [Thread-383 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:38,901 INFO [Thread-383 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:38,925 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T15:19:38,926 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:38,934 INFO [Thread-383 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:38,934 INFO [Thread-383 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:38,934 INFO [Thread-383 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-02T15:19:38,939 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37232c97{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:38,940 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@599e50d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:38,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:38,941 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:38,941 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T15:19:38,949 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:38,958 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5edc3560{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:38,959 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@40318239{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:39,143 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T15:19:39,143 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-02T15:19:39,143 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T15:19:39,145 INFO [Thread-383 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T15:19:39,212 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:39,601 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:39,962 INFO [Thread-383 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:39,988 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@c3e40d6{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-37865-hadoop-yarn-common-3_4_1_jar-_-any-2639259572190732463/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T15:19:39,989 INFO [Thread-383 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2edf2e84{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-44487-hadoop-yarn-common-3_4_1_jar-_-any-7860204155181252928/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T15:19:39,989 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@75e1533{HTTP/1.1, (http/1.1)}{localhost:37865} 2024-12-02T15:19:39,989 INFO [Time-limited test {}] server.Server(415): Started @15210ms 2024-12-02T15:19:39,992 INFO [Thread-383 {}] server.AbstractConnector(333): Started ServerConnector@1cc97fcb{HTTP/1.1, (http/1.1)}{localhost:44487} 2024-12-02T15:19:39,992 INFO [Thread-383 {}] server.Server(415): Started @15214ms 2024-12-02T15:19:40,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741841_1017 (size=5) 2024-12-02T15:19:40,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741841_1017 (size=5) 2024-12-02T15:19:40,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741841_1017 (size=5) 2024-12-02T15:19:41,008 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T15:19:41,014 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:41,057 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T15:19:41,058 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:41,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:41,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:41,091 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T15:19:41,095 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:41,099 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f604dfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:41,100 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@681c7fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:41,174 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T15:19:41,175 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T15:19:41,175 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T15:19:41,175 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T15:19:41,197 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,221 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,334 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@126061c1{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-36061-hadoop-yarn-common-3_4_1_jar-_-any-5384108660198029808/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T15:19:41,350 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6598a136{HTTP/1.1, (http/1.1)}{localhost:36061} 2024-12-02T15:19:41,351 INFO [Time-limited test {}] server.Server(415): Started @16572ms 2024-12-02T15:19:41,651 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-02T15:19:41,654 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:41,675 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-02T15:19:41,676 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-02T15:19:41,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-02T15:19:41,689 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-02T15:19:41,689 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-02T15:19:41,691 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-02T15:19:41,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d3607f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,AVAILABLE} 2024-12-02T15:19:41,695 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5f1729d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-02T15:19:41,749 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-02T15:19:41,749 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-02T15:19:41,749 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-02T15:19:41,749 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-02T15:19:41,760 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,765 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,784 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:19:41,892 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-02T15:19:41,898 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59449b25{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/java.io.tmpdir/jetty-localhost-43235-hadoop-yarn-common-3_4_1_jar-_-any-8674736324231955633/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T15:19:41,900 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3fa648e6{HTTP/1.1, (http/1.1)}{localhost:43235} 2024-12-02T15:19:41,900 INFO [Time-limited test {}] server.Server(415): Started @17121ms 2024-12-02T15:19:41,926 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-02T15:19:41,928 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-02T15:19:41,969 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-12-02T15:19:41,971 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:19:42,016 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=725, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=231, ProcessCount=11, AvailableMemoryMB=3680 2024-12-02T15:19:42,041 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=725 is superior to 500 2024-12-02T15:19:42,050 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T15:19:42,057 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is be0458f36726,34441,1733152772730 2024-12-02T15:19:42,058 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@3bc8b85 2024-12-02T15:19:42,058 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T15:19:42,064 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T15:19:42,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:19:42,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:42,075 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:19:42,077 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-12-02T15:19:42,077 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:42,080 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:19:42,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T15:19:42,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741842_1018 (size=422) 2024-12-02T15:19:42,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741842_1018 (size=422) 2024-12-02T15:19:42,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741842_1018 (size=422) 2024-12-02T15:19:42,134 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 00df70f91cea80c6193fbc6c5ad9d3b9, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:42,135 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fb3d55cc4b4cfd19ef91f04ca0bd2813, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:42,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741844_1020 (size=83) 2024-12-02T15:19:42,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741844_1020 (size=83) 2024-12-02T15:19:42,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741844_1020 (size=83) 2024-12-02T15:19:42,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741843_1019 (size=83) 2024-12-02T15:19:42,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741843_1019 (size=83) 2024-12-02T15:19:42,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741843_1019 (size=83) 2024-12-02T15:19:42,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:42,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 00df70f91cea80c6193fbc6c5ad9d3b9, disabling compactions & flushes 2024-12-02T15:19:42,177 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,177 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,177 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. after waiting 0 ms 2024-12-02T15:19:42,177 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,177 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,177 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 00df70f91cea80c6193fbc6c5ad9d3b9: Waiting for close lock at 1733152782176Disabling compacts and flushes for region at 1733152782176Disabling writes for close at 1733152782177 (+1 ms)Writing region close event to WAL at 1733152782177Closed at 1733152782177 2024-12-02T15:19:42,178 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:42,178 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing fb3d55cc4b4cfd19ef91f04ca0bd2813, disabling compactions & flushes 2024-12-02T15:19:42,178 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,178 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,178 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. after waiting 0 ms 2024-12-02T15:19:42,178 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,179 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,179 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for fb3d55cc4b4cfd19ef91f04ca0bd2813: Waiting for close lock at 1733152782178Disabling compacts and flushes for region at 1733152782178Disabling writes for close at 1733152782178Writing region close event to WAL at 1733152782178Closed at 1733152782178 2024-12-02T15:19:42,181 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:19:42,182 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733152782181"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152782181"}]},"ts":"1733152782181"} 2024-12-02T15:19:42,182 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733152782181"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152782181"}]},"ts":"1733152782181"} 2024-12-02T15:19:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T15:19:42,226 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:19:42,229 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:19:42,229 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152782229"}]},"ts":"1733152782229"} 2024-12-02T15:19:42,233 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T15:19:42,233 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:19:42,236 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:19:42,236 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:19:42,237 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:19:42,237 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:19:42,237 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:19:42,237 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, ASSIGN}] 2024-12-02T15:19:42,240 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, ASSIGN 2024-12-02T15:19:42,242 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, ASSIGN 2024-12-02T15:19:42,245 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:19:42,245 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:19:42,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T15:19:42,396 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:19:42,396 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=00df70f91cea80c6193fbc6c5ad9d3b9, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:19:42,396 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=fb3d55cc4b4cfd19ef91f04ca0bd2813, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:19:42,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, ASSIGN because future has completed 2024-12-02T15:19:42,402 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:19:42,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, ASSIGN because future has completed 2024-12-02T15:19:42,404 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:19:42,568 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:42,577 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,577 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => fb3d55cc4b4cfd19ef91f04ca0bd2813, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:19:42,577 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. service=AccessControlService 2024-12-02T15:19:42,578 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:42,578 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,578 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:42,578 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,578 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,581 INFO [StoreOpener-fb3d55cc4b4cfd19ef91f04ca0bd2813-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,584 INFO [StoreOpener-fb3d55cc4b4cfd19ef91f04ca0bd2813-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fb3d55cc4b4cfd19ef91f04ca0bd2813 columnFamilyName cf 2024-12-02T15:19:42,584 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34043, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:42,584 DEBUG [StoreOpener-fb3d55cc4b4cfd19ef91f04ca0bd2813-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:42,585 INFO [StoreOpener-fb3d55cc4b4cfd19ef91f04ca0bd2813-1 {}] regionserver.HStore(327): Store=fb3d55cc4b4cfd19ef91f04ca0bd2813/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:42,586 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,587 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,588 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,589 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,589 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,589 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,589 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 00df70f91cea80c6193fbc6c5ad9d3b9, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:19:42,590 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. service=AccessControlService 2024-12-02T15:19:42,590 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:42,590 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,591 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:42,591 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,591 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,593 INFO [StoreOpener-00df70f91cea80c6193fbc6c5ad9d3b9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,596 INFO [StoreOpener-00df70f91cea80c6193fbc6c5ad9d3b9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 00df70f91cea80c6193fbc6c5ad9d3b9 columnFamilyName cf 2024-12-02T15:19:42,596 DEBUG [StoreOpener-00df70f91cea80c6193fbc6c5ad9d3b9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:42,597 INFO [StoreOpener-00df70f91cea80c6193fbc6c5ad9d3b9-1 {}] regionserver.HStore(327): Store=00df70f91cea80c6193fbc6c5ad9d3b9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:42,597 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,599 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,602 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,603 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,603 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,604 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,611 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:42,611 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,612 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened fb3d55cc4b4cfd19ef91f04ca0bd2813; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59317393, jitterRate=-0.11610196530818939}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:42,612 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:42,613 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for fb3d55cc4b4cfd19ef91f04ca0bd2813: Running coprocessor pre-open hook at 1733152782578Writing region info on filesystem at 1733152782578Initializing all the Stores at 1733152782580 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152782580Cleaning up temporary data from old regions at 1733152782589 (+9 ms)Running coprocessor post-open hooks at 1733152782612 (+23 ms)Region opened successfully at 1733152782613 (+1 ms) 2024-12-02T15:19:42,624 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., pid=10, masterSystemTime=1733152782556 2024-12-02T15:19:42,625 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:42,631 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 00df70f91cea80c6193fbc6c5ad9d3b9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61996580, jitterRate=-0.07617896795272827}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:42,631 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:42,631 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 00df70f91cea80c6193fbc6c5ad9d3b9: Running coprocessor pre-open hook at 1733152782591Writing region info on filesystem at 1733152782591Initializing all the Stores at 1733152782593 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152782593Cleaning up temporary data from old regions at 1733152782603 (+10 ms)Running coprocessor post-open hooks at 1733152782631 (+28 ms)Region opened successfully at 1733152782631 2024-12-02T15:19:42,633 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9., pid=11, masterSystemTime=1733152782567 2024-12-02T15:19:42,633 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,634 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:42,635 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=fb3d55cc4b4cfd19ef91f04ca0bd2813, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:19:42,638 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,638 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:42,639 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=00df70f91cea80c6193fbc6c5ad9d3b9, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:19:42,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:19:42,643 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=be0458f36726,45405,1733152773787, table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T15:19:42,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:19:42,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-12-02T15:19:42,654 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787 in 246 msec 2024-12-02T15:19:42,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=8 2024-12-02T15:19:42,658 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848 in 248 msec 2024-12-02T15:19:42,660 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, ASSIGN in 417 msec 2024-12-02T15:19:42,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-02T15:19:42,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, ASSIGN in 421 msec 2024-12-02T15:19:42,665 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:19:42,665 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152782665"}]},"ts":"1733152782665"} 2024-12-02T15:19:42,669 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T15:19:42,671 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:19:42,675 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T15:19:42,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:42,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:42,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52157, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:42,692 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:42,692 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:42,692 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:42,694 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52543, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-02T15:19:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:42,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:42,699 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36719, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=ClientService 2024-12-02T15:19:42,701 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T15:19:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:42,778 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:42,778 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:42,778 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:19:42,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:42,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:42,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:42,795 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:42,799 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 728 msec 2024-12-02T15:19:43,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:19:43,095 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-02T15:19:43,095 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-02T15:19:43,096 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-02T15:19:43,098 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:19:43,098 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-02T15:19:43,106 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T15:19:43,106 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-02T15:19:43,108 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,108 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T15:19:43,110 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T15:19:43,110 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-02T15:19:43,113 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:19:43,114 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-02T15:19:43,114 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T15:19:43,114 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-02T15:19:43,115 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T15:19:43,115 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-02T15:19:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-02T15:19:43,214 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:43,214 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSplitRegion get assigned. Timeout = 60000ms 2024-12-02T15:19:43,215 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:19:43,221 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned to meta. Checking AM states. 2024-12-02T15:19:43,221 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:19:43,222 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSplitRegion assigned. 2024-12-02T15:19:43,225 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:43,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152783239 (current time:1733152783239). 2024-12-02T15:19:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:19:43,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T15:19:43,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:19:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d5bf829, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:43,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:43,245 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:43,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:43,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:43,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a6e0878, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:43,246 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:43,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,248 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:43,249 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2dfae4f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:43,252 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:43,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:43,255 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33692, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:43,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:43,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:43,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,267 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:43,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c8dce8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:43,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:43,271 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:43,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:43,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:43,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@515714d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:43,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:43,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,275 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:43,276 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d95e05c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:43,278 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:43,279 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:43,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33694, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:43,283 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:43,284 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:43,285 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55586, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:43,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:43,287 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:43,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T15:19:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:19:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T15:19:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T15:19:43,299 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:19:43,304 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:19:43,316 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:19:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741845_1021 (size=215) 2024-12-02T15:19:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741845_1021 (size=215) 2024-12-02T15:19:43,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741845_1021 (size=215) 2024-12-02T15:19:43,330 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:19:43,333 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813}] 2024-12-02T15:19:43,337 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:43,337 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T15:19:43,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-12-02T15:19:43,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-12-02T15:19:43,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:43,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:43,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 00df70f91cea80c6193fbc6c5ad9d3b9: 2024-12-02T15:19:43,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for fb3d55cc4b4cfd19ef91f04ca0bd2813: 2024-12-02T15:19:43,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:43,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:43,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:43,507 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:43,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:19:43,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:19:43,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741847_1023 (size=86) 2024-12-02T15:19:43,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741847_1023 (size=86) 2024-12-02T15:19:43,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741847_1023 (size=86) 2024-12-02T15:19:43,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:43,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741846_1022 (size=86) 2024-12-02T15:19:43,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741846_1022 (size=86) 2024-12-02T15:19:43,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741846_1022 (size=86) 2024-12-02T15:19:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-02T15:19:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:43,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-12-02T15:19:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-12-02T15:19:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-12-02T15:19:43,539 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:43,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:43,540 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:43,540 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:43,548 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 in 208 msec 2024-12-02T15:19:43,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-12-02T15:19:43,551 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:19:43,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 in 208 msec 2024-12-02T15:19:43,554 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:19:43,558 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:19:43,558 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,561 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741848_1024 (size=597) 2024-12-02T15:19:43,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741848_1024 (size=597) 2024-12-02T15:19:43,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741848_1024 (size=597) 2024-12-02T15:19:43,591 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:19:43,605 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:19:43,606 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T15:19:43,616 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:19:43,616 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-12-02T15:19:43,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 324 msec 2024-12-02T15:19:43,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-12-02T15:19:43,924 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:43,938 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='03079cf2dc85248d457af1fa22df96e20', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:19:43,940 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='1224bf97f9facde458819cb26c06b94f1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:19:43,942 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='2d8c761b29e2978a8cc8ab72a6666292f', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:19:43,944 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='3ea1f11b744d19977ade6d66ac26970dc', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:19:43,945 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='5fe1b2158112b349c979f214e81534a54', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:19:43,946 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSplitRegion', row='48ff400d1f6e8d4f4b60b66971b5208a1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:19:43,947 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:43,949 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50966, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:43,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:19:43,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:19:43,956 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:43,961 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:43,962 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:43,963 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:19:43,965 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:43,980 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:43,988 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:43,995 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152783995 (current time:1733152783995). 2024-12-02T15:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T15:19:43,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:19:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ac403a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:43,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:43,997 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:43,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:43,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:43,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7fd3d177, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:43,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:43,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:43,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,000 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36582, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:44,001 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c21229b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:44,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:44,002 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:44,003 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:44,004 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37250, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:44,005 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:44,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,006 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:44,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c8078fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:44,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:44,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:44,008 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:44,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:44,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:44,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58e4cd1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:44,008 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:44,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:44,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,010 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36602, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:44,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@611b07b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:44,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:44,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:44,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:44,014 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:44,016 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:44,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:44,018 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44454, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:44,019 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:44,020 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T15:19:44,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:19:44,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:44,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T15:19:44,025 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:19:44,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T15:19:44,026 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:19:44,031 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:19:44,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741849_1025 (size=210) 2024-12-02T15:19:44,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741849_1025 (size=210) 2024-12-02T15:19:44,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741849_1025 (size=210) 2024-12-02T15:19:44,050 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:19:44,051 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813}] 2024-12-02T15:19:44,053 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:44,053 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:44,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T15:19:44,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-12-02T15:19:44,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-12-02T15:19:44,207 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:44,209 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:44,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 00df70f91cea80c6193fbc6c5ad9d3b9 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-02T15:19:44,210 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing fb3d55cc4b4cfd19ef91f04ca0bd2813 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-02T15:19:44,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/.tmp/cf/17d168090aeb4ea89199d70ca4ff63bb is 71, key is 0760b7476fff6b37bc541842c8cb7aec/cf:q/1733152783951/Put/seqid=0 2024-12-02T15:19:44,290 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/.tmp/cf/ea734200e4034710856ca8335dabc7e6 is 71, key is 10cc39cc06c4410a6e0d6a5eecab2e50/cf:q/1733152783952/Put/seqid=0 2024-12-02T15:19:44,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741850_1026 (size=8188) 2024-12-02T15:19:44,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741850_1026 (size=8188) 2024-12-02T15:19:44,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741850_1026 (size=8188) 2024-12-02T15:19:44,316 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/.tmp/cf/ea734200e4034710856ca8335dabc7e6 2024-12-02T15:19:44,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741851_1027 (size=5424) 2024-12-02T15:19:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741851_1027 (size=5424) 2024-12-02T15:19:44,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741851_1027 (size=5424) 2024-12-02T15:19:44,325 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/.tmp/cf/17d168090aeb4ea89199d70ca4ff63bb 2024-12-02T15:19:44,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T15:19:44,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/.tmp/cf/17d168090aeb4ea89199d70ca4ff63bb as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb 2024-12-02T15:19:44,392 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/.tmp/cf/ea734200e4034710856ca8335dabc7e6 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6 2024-12-02T15:19:44,409 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb, entries=5, sequenceid=6, filesize=5.3 K 2024-12-02T15:19:44,409 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6, entries=45, sequenceid=6, filesize=8.0 K 2024-12-02T15:19:44,420 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 00df70f91cea80c6193fbc6c5ad9d3b9 in 205ms, sequenceid=6, compaction requested=false 2024-12-02T15:19:44,420 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for fb3d55cc4b4cfd19ef91f04ca0bd2813 in 207ms, sequenceid=6, compaction requested=false 2024-12-02T15:19:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-02T15:19:44,421 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-12-02T15:19:44,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for fb3d55cc4b4cfd19ef91f04ca0bd2813: 2024-12-02T15:19:44,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 00df70f91cea80c6193fbc6c5ad9d3b9: 2024-12-02T15:19:44,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:44,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:44,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:44,423 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6] hfiles 2024-12-02T15:19:44,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:44,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb] hfiles 2024-12-02T15:19:44,427 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,428 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741852_1028 (size=125) 2024-12-02T15:19:44,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741852_1028 (size=125) 2024-12-02T15:19:44,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741852_1028 (size=125) 2024-12-02T15:19:44,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:19:44,471 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-02T15:19:44,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-12-02T15:19:44,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:44,472 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:19:44,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813 in 425 msec 2024-12-02T15:19:44,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741853_1029 (size=125) 2024-12-02T15:19:44,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741853_1029 (size=125) 2024-12-02T15:19:44,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741853_1029 (size=125) 2024-12-02T15:19:44,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:19:44,532 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-12-02T15:19:44,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-12-02T15:19:44,534 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:44,534 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:19:44,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-12-02T15:19:44,548 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:19:44,548 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9 in 493 msec 2024-12-02T15:19:44,550 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:19:44,551 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:19:44,552 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,553 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741854_1030 (size=675) 2024-12-02T15:19:44,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741854_1030 (size=675) 2024-12-02T15:19:44,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741854_1030 (size=675) 2024-12-02T15:19:44,592 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:19:44,608 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:19:44,609 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,612 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:19:44,612 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-12-02T15:19:44,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 591 msec 2024-12-02T15:19:44,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-12-02T15:19:44,654 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:44,681 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:44,685 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:44,687 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-02T15:19:44,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50982, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:44,689 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T15:19:44,691 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44466, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:44,691 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T15:19:44,692 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37270, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:19:44,692 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-02T15:19:44,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:19:44,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:44,703 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:19:44,703 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:44,703 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-12-02T15:19:44,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T15:19:44,706 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:19:44,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741855_1031 (size=390) 2024-12-02T15:19:44,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741855_1031 (size=390) 2024-12-02T15:19:44,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741855_1031 (size=390) 2024-12-02T15:19:44,746 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3feda63ca2050c61bf3633c0ebd2c0ff, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:44,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T15:19:44,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741856_1032 (size=75) 2024-12-02T15:19:44,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741856_1032 (size=75) 2024-12-02T15:19:44,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741856_1032 (size=75) 2024-12-02T15:19:44,821 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:44,822 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 3feda63ca2050c61bf3633c0ebd2c0ff, disabling compactions & flushes 2024-12-02T15:19:44,822 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:44,822 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:44,822 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. after waiting 0 ms 2024-12-02T15:19:44,822 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:44,822 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:44,823 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3feda63ca2050c61bf3633c0ebd2c0ff: Waiting for close lock at 1733152784821Disabling compacts and flushes for region at 1733152784821Disabling writes for close at 1733152784822 (+1 ms)Writing region close event to WAL at 1733152784822Closed at 1733152784822 2024-12-02T15:19:44,825 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:19:44,826 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733152784826"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152784826"}]},"ts":"1733152784826"} 2024-12-02T15:19:44,832 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-02T15:19:44,834 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:19:44,835 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152784835"}]},"ts":"1733152784835"} 2024-12-02T15:19:44,840 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-12-02T15:19:44,840 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:19:44,842 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:19:44,843 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:19:44,843 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:19:44,843 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:19:44,843 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:19:44,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, ASSIGN}] 2024-12-02T15:19:44,846 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, ASSIGN 2024-12-02T15:19:44,850 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:19:45,001 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T15:19:45,001 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=3feda63ca2050c61bf3633c0ebd2c0ff, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:45,005 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, ASSIGN because future has completed 2024-12-02T15:19:45,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:19:45,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T15:19:45,164 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:45,165 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 3feda63ca2050c61bf3633c0ebd2c0ff, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.', STARTKEY => '', ENDKEY => ''} 2024-12-02T15:19:45,165 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. service=AccessControlService 2024-12-02T15:19:45,165 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:45,165 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,165 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:45,166 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,166 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,168 INFO [StoreOpener-3feda63ca2050c61bf3633c0ebd2c0ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,170 INFO [StoreOpener-3feda63ca2050c61bf3633c0ebd2c0ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3feda63ca2050c61bf3633c0ebd2c0ff columnFamilyName cf 2024-12-02T15:19:45,170 DEBUG [StoreOpener-3feda63ca2050c61bf3633c0ebd2c0ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:45,171 INFO [StoreOpener-3feda63ca2050c61bf3633c0ebd2c0ff-1 {}] regionserver.HStore(327): Store=3feda63ca2050c61bf3633c0ebd2c0ff/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:45,171 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,173 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,173 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,174 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,174 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,177 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,180 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:19:45,181 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 3feda63ca2050c61bf3633c0ebd2c0ff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70785555, jitterRate=0.05478696525096893}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:45,181 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:45,182 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 3feda63ca2050c61bf3633c0ebd2c0ff: Running coprocessor pre-open hook at 1733152785166Writing region info on filesystem at 1733152785166Initializing all the Stores at 1733152785167 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152785167Cleaning up temporary data from old regions at 1733152785174 (+7 ms)Running coprocessor post-open hooks at 1733152785181 (+7 ms)Region opened successfully at 1733152785182 (+1 ms) 2024-12-02T15:19:45,183 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., pid=20, masterSystemTime=1733152785159 2024-12-02T15:19:45,186 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:45,186 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:45,188 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=3feda63ca2050c61bf3633c0ebd2c0ff, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:45,191 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:19:45,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-12-02T15:19:45,203 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677 in 193 msec 2024-12-02T15:19:45,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-12-02T15:19:45,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, ASSIGN in 360 msec 2024-12-02T15:19:45,208 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:19:45,208 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152785208"}]},"ts":"1733152785208"} 2024-12-02T15:19:45,211 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-12-02T15:19:45,213 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:19:45,213 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-12-02T15:19:45,217 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T15:19:45,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:45,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:45,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:45,303 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:19:45,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,312 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,313 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,314 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:19:45,315 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 614 msec 2024-12-02T15:19:45,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-12-02T15:19:45,333 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:45,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:45,337 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-02T15:19:47,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741857_1033 (size=134217728) 2024-12-02T15:19:47,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741857_1033 (size=134217728) 2024-12-02T15:19:47,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741857_1033 (size=134217728) 2024-12-02T15:19:48,081 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:19:48,235 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-12-02T15:19:49,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741858_1034 (size=134217728) 2024-12-02T15:19:49,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741858_1034 (size=134217728) 2024-12-02T15:19:49,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741858_1034 (size=134217728) 2024-12-02T15:19:50,149 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1733152785343/Put/seqid=0 2024-12-02T15:19:50,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741859_1035 (size=51979256) 2024-12-02T15:19:50,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741859_1035 (size=51979256) 2024-12-02T15:19:50,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741859_1035 (size=51979256) 2024-12-02T15:19:50,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e6f09ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:50,163 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:50,163 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:50,165 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:50,166 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:50,166 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:50,166 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60b0d803, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:50,166 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:50,167 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:50,167 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:50,169 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36620, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:50,170 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f22e0ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:50,171 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:50,173 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:50,173 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:50,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37276, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:50,188 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:36329/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T15:19:50,189 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-02T15:19:50,190 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is be0458f36726,34441,1733152772730 2024-12-02T15:19:50,190 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4c8fc7e0 2024-12-02T15:19:50,190 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-02T15:19:50,196 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36630, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-02T15:19:50,203 WARN [IPC Server handler 4 on default port 36329 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T15:19:50,209 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:50,212 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:50,214 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44478, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:50,220 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:50,237 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:36329/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-12-02T15:19:50,274 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-12-02T15:19:50,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:52543 deadline: 1733152850273, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-12-02T15:19:50,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:19:50,283 WARN [IPC Server handler 4 on default port 36329 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-12-02T15:19:50,312 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:36329/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file for inclusion in 3feda63ca2050c61bf3633c0ebd2c0ff/cf 2024-12-02T15:19:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-12-02T15:19:50,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(628): Region bounds: first= last= 2024-12-02T15:19:50,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:36329/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-12-02T15:19:50,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(2603): Flush status journal for 3feda63ca2050c61bf3633c0ebd2c0ff: 2024-12-02T15:19:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:36329/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/output/cf/test_file to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/staging/jenkins__testExportFileSystemStateWithSplitRegion__cdbovtdadqo0v5nj7p7nvcn394bqh5nm54gjcke1fc5h63an6h37at0r4sgoeugv/cf/test_file 2024-12-02T15:19:50,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/staging/jenkins__testExportFileSystemStateWithSplitRegion__cdbovtdadqo0v5nj7p7nvcn394bqh5nm54gjcke1fc5h63an6h37at0r4sgoeugv/cf/test_file as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ 2024-12-02T15:19:50,351 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/staging/jenkins__testExportFileSystemStateWithSplitRegion__cdbovtdadqo0v5nj7p7nvcn394bqh5nm54gjcke1fc5h63an6h37at0r4sgoeugv/cf/test_file into 3feda63ca2050c61bf3633c0ebd2c0ff/cf as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ - updating store file list. 2024-12-02T15:19:50,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T15:19:50,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ into 3feda63ca2050c61bf3633c0ebd2c0ff/cf 2024-12-02T15:19:50,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/staging/jenkins__testExportFileSystemStateWithSplitRegion__cdbovtdadqo0v5nj7p7nvcn394bqh5nm54gjcke1fc5h63an6h37at0r4sgoeugv/cf/test_file into 3feda63ca2050c61bf3633c0ebd2c0ff/cf (new location: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_) 2024-12-02T15:19:50,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/staging/jenkins__testExportFileSystemStateWithSplitRegion__cdbovtdadqo0v5nj7p7nvcn394bqh5nm54gjcke1fc5h63an6h37at0r4sgoeugv/cf/test_file 2024-12-02T15:19:50,401 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T15:19:50,401 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:19:50,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:50,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:50,401 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:50,403 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=be0458f36726:43025 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T15:19:50,404 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-12-02T15:19:50,404 DEBUG [RPCClient-NioEventLoopGroup-6-13 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2 from cache 2024-12-02T15:19:50,406 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:50,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] ipc.CallRunner(93): RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025: skipped callId: 7 service: ClientService methodName: CleanupBulkLoad size: 336 connection: 172.17.0.2:44478 deadline: 1733152850401 param: TODO: class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$CleanupBulkLoadRequest connection: 172.17.0.2:44478 2024-12-02T15:19:50,410 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:50,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=be0458f36726,43025,1733152773677 2024-12-02T15:19:50,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=3feda63ca2050c61bf3633c0ebd2c0ff, daughterA=902a36716bdb460286a5cda0e9c63607, daughterB=f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:50,437 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=3feda63ca2050c61bf3633c0ebd2c0ff, daughterA=902a36716bdb460286a5cda0e9c63607, daughterB=f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:50,437 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=3feda63ca2050c61bf3633c0ebd2c0ff, daughterA=902a36716bdb460286a5cda0e9c63607, daughterB=f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:50,437 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=3feda63ca2050c61bf3633c0ebd2c0ff, daughterA=902a36716bdb460286a5cda0e9c63607, daughterB=f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:50,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:50,446 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, UNASSIGN}] 2024-12-02T15:19:50,449 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, UNASSIGN 2024-12-02T15:19:50,452 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=3feda63ca2050c61bf3633c0ebd2c0ff, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:50,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, UNASSIGN because future has completed 2024-12-02T15:19:50,456 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T15:19:50,457 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:19:50,525 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=be0458f36726:45405 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 34 more 2024-12-02T15:19:50,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:50,617 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:50,617 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T15:19:50,619 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 3feda63ca2050c61bf3633c0ebd2c0ff, disabling compactions & flushes 2024-12-02T15:19:50,619 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:50,619 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:50,619 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. after waiting 0 ms 2024-12-02T15:19:50,619 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:50,652 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-12-02T15:19:50,658 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:19:50,659 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff. 2024-12-02T15:19:50,659 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 3feda63ca2050c61bf3633c0ebd2c0ff: Waiting for close lock at 1733152790618Running coprocessor pre-close hooks at 1733152790619 (+1 ms)Disabling compacts and flushes for region at 1733152790619Disabling writes for close at 1733152790619Writing region close event to WAL at 1733152790621 (+2 ms)Running coprocessor post-close hooks at 1733152790653 (+32 ms)Closed at 1733152790659 (+6 ms) 2024-12-02T15:19:50,668 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:50,670 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=3feda63ca2050c61bf3633c0ebd2c0ff, regionState=CLOSED 2024-12-02T15:19:50,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:19:50,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-12-02T15:19:50,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 3feda63ca2050c61bf3633c0ebd2c0ff, server=be0458f36726,43025,1733152773677 in 219 msec 2024-12-02T15:19:50,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-12-02T15:19:50,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=3feda63ca2050c61bf3633c0ebd2c0ff, UNASSIGN in 237 msec 2024-12-02T15:19:50,710 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:50,715 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=3feda63ca2050c61bf3633c0ebd2c0ff, threads=1 2024-12-02T15:19:50,720 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ for region: 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:50,733 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T15:19:50,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:50,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741860_1036 (size=21) 2024-12-02T15:19:50,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741860_1036 (size=21) 2024-12-02T15:19:50,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741860_1036 (size=21) 2024-12-02T15:19:50,764 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_: NONE, but ROW specified in column family configuration 2024-12-02T15:19:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741861_1037 (size=21) 2024-12-02T15:19:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741861_1037 (size=21) 2024-12-02T15:19:50,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741861_1037 (size=21) 2024-12-02T15:19:50,782 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ for region: 3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:19:50,785 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 3feda63ca2050c61bf3633c0ebd2c0ff Daughter A: [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff] storefiles, Daughter B: [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff] storefiles. 2024-12-02T15:19:50,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741862_1038 (size=76) 2024-12-02T15:19:50,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741862_1038 (size=76) 2024-12-02T15:19:50,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741862_1038 (size=76) 2024-12-02T15:19:50,804 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:50,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741863_1039 (size=76) 2024-12-02T15:19:50,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741863_1039 (size=76) 2024-12-02T15:19:50,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741863_1039 (size=76) 2024-12-02T15:19:50,822 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:50,832 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T15:19:50,834 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-12-02T15:19:50,837 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1733152790836"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1733152790836"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1733152790836"}]},"ts":"1733152790836"} 2024-12-02T15:19:50,837 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733152790836"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152790836"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733152790836"}]},"ts":"1733152790836"} 2024-12-02T15:19:50,837 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1733152790836"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152790836"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1733152790836"}]},"ts":"1733152790836"} 2024-12-02T15:19:50,852 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, ASSIGN}] 2024-12-02T15:19:50,854 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, ASSIGN 2024-12-02T15:19:50,854 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, ASSIGN 2024-12-02T15:19:50,855 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, ASSIGN; state=SPLITTING_NEW, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:19:50,855 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, ASSIGN; state=SPLITTING_NEW, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:19:51,006 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:19:51,006 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=f157ceef3558c1ef6b99a6ec43b90edd, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:51,006 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=902a36716bdb460286a5cda0e9c63607, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:51,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, ASSIGN because future has completed 2024-12-02T15:19:51,009 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:19:51,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, ASSIGN because future has completed 2024-12-02T15:19:51,013 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:19:51,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:51,334 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:19:51,334 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 902a36716bdb460286a5cda0e9c63607, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.', STARTKEY => '', ENDKEY => '5'} 2024-12-02T15:19:51,335 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. service=AccessControlService 2024-12-02T15:19:51,335 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:51,335 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:51,335 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:51,336 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:51,336 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:51,338 INFO [StoreOpener-902a36716bdb460286a5cda0e9c63607-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:51,340 INFO [StoreOpener-902a36716bdb460286a5cda0e9c63607-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 902a36716bdb460286a5cda0e9c63607 columnFamilyName cf 2024-12-02T15:19:51,341 DEBUG [StoreOpener-902a36716bdb460286a5cda0e9c63607-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:51,358 DEBUG [StoreFileOpener-902a36716bdb460286a5cda0e9c63607-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff: NONE, but ROW specified in column family configuration 2024-12-02T15:19:51,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:52,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:53,940 DEBUG [StoreOpener-902a36716bdb460286a5cda0e9c63607-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_-bottom 2024-12-02T15:19:53,941 INFO [StoreOpener-902a36716bdb460286a5cda0e9c63607-1 {}] regionserver.HStore(327): Store=902a36716bdb460286a5cda0e9c63607/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:53,941 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,944 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,947 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,948 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,949 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,952 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,955 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 902a36716bdb460286a5cda0e9c63607; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71031183, jitterRate=0.058447107672691345}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:53,955 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:53,956 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 902a36716bdb460286a5cda0e9c63607: Running coprocessor pre-open hook at 1733152791336Writing region info on filesystem at 1733152791336Initializing all the Stores at 1733152791338 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152791338Cleaning up temporary data from old regions at 1733152793949 (+2611 ms)Running coprocessor post-open hooks at 1733152793955 (+6 ms)Region opened successfully at 1733152793956 (+1 ms) 2024-12-02T15:19:53,957 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607., pid=27, masterSystemTime=1733152791163 2024-12-02T15:19:53,958 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.,because compaction is disabled. 2024-12-02T15:19:53,961 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:19:53,961 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:19:53,962 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:19:53,962 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => f157ceef3558c1ef6b99a6ec43b90edd, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.', STARTKEY => '5', ENDKEY => ''} 2024-12-02T15:19:53,962 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. service=AccessControlService 2024-12-02T15:19:53,962 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:19:53,962 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=902a36716bdb460286a5cda0e9c63607, regionState=OPEN, openSeqNum=7, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:53,963 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,963 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:19:53,963 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,963 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:19:53,966 INFO [StoreOpener-f157ceef3558c1ef6b99a6ec43b90edd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,967 INFO [StoreOpener-f157ceef3558c1ef6b99a6ec43b90edd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f157ceef3558c1ef6b99a6ec43b90edd columnFamilyName cf 2024-12-02T15:19:53,968 DEBUG [StoreOpener-f157ceef3558c1ef6b99a6ec43b90edd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:53,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=24 2024-12-02T15:19:53,971 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677 in 2.9540 sec 2024-12-02T15:19:53,976 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, ASSIGN in 3.1190 sec 2024-12-02T15:19:53,984 DEBUG [StoreFileOpener-f157ceef3558c1ef6b99a6ec43b90edd-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff: NONE, but ROW specified in column family configuration 2024-12-02T15:19:53,986 DEBUG [StoreOpener-f157ceef3558c1ef6b99a6ec43b90edd-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_-top 2024-12-02T15:19:53,986 INFO [StoreOpener-f157ceef3558c1ef6b99a6ec43b90edd-1 {}] regionserver.HStore(327): Store=f157ceef3558c1ef6b99a6ec43b90edd/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:19:53,986 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,988 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,990 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,991 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,991 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,995 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,999 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened f157ceef3558c1ef6b99a6ec43b90edd; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59558825, jitterRate=-0.11250434815883636}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:19:53,999 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:53,999 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for f157ceef3558c1ef6b99a6ec43b90edd: Running coprocessor pre-open hook at 1733152793963Writing region info on filesystem at 1733152793963Initializing all the Stores at 1733152793965 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152793965Cleaning up temporary data from old regions at 1733152793991 (+26 ms)Running coprocessor post-open hooks at 1733152793999 (+8 ms)Region opened successfully at 1733152793999 2024-12-02T15:19:54,000 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd., pid=26, masterSystemTime=1733152791163 2024-12-02T15:19:54,001 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.,because compaction is disabled. 2024-12-02T15:19:54,010 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:19:54,011 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=f157ceef3558c1ef6b99a6ec43b90edd, regionState=OPEN, openSeqNum=7, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:19:54,011 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:19:54,014 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:19:54,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=25 2024-12-02T15:19:54,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677 in 3.0060 sec 2024-12-02T15:19:54,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=25, resume processing ppid=21 2024-12-02T15:19:54,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, ASSIGN in 3.1680 sec 2024-12-02T15:19:54,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=3feda63ca2050c61bf3633c0ebd2c0ff, daughterA=902a36716bdb460286a5cda0e9c63607, daughterB=f157ceef3558c1ef6b99a6ec43b90edd in 3.5970 sec 2024-12-02T15:19:54,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-12-02T15:19:54,623 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-12-02T15:19:54,623 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:54,629 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152794629 (current time:1733152794629). 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@215f80fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:54,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:54,630 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38fdc394, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:54,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,632 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:54,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35e0d00d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:54,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:54,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:54,636 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56152, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:54,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:54,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:54,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,638 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:54,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44cec9a8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:19:54,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:19:54,640 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:19:54,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:19:54,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:19:54,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@558ceafd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:19:54,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:19:54,642 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,643 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45272, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:19:54,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d8ce26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:19:54,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:19:54,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:19:54,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:54,647 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56156, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:54,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:19:54,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:19:54,651 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:19:54,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:19:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:19:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:19:54,653 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:19:54,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-12-02T15:19:54,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:19:54,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-12-02T15:19:54,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T15:19:54,657 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:19:54,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T15:19:54,658 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:19:54,661 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:19:54,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741864_1040 (size=197) 2024-12-02T15:19:54,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741864_1040 (size=197) 2024-12-02T15:19:54,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741864_1040 (size=197) 2024-12-02T15:19:54,671 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:19:54,671 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 902a36716bdb460286a5cda0e9c63607}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd}] 2024-12-02T15:19:54,673 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:54,673 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:54,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T15:19:54,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-12-02T15:19:54,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 902a36716bdb460286a5cda0e9c63607: 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for f157ceef3558c1ef6b99a6ec43b90edd: 2024-12-02T15:19:54,826 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_-top] hfiles 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_-bottom] hfiles 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:54,827 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741866_1042 (size=182) 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741865_1041 (size=182) 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741866_1042 (size=182) 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741865_1041 (size=182) 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741866_1042 (size=182) 2024-12-02T15:19:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741865_1041 (size=182) 2024-12-02T15:19:54,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T15:19:55,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:19:55,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:19:55,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-12-02T15:19:55,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-12-02T15:19:55,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-12-02T15:19:55,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:55,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-12-02T15:19:55,239 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:55,239 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:19:55,239 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:19:55,242 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd in 569 msec 2024-12-02T15:19:55,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-12-02T15:19:55,243 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:19:55,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 902a36716bdb460286a5cda0e9c63607 in 569 msec 2024-12-02T15:19:55,245 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-12-02T15:19:55,245 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-12-02T15:19:55,245 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:19:55,246 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_] hfiles 2024-12-02T15:19:55,246 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ 2024-12-02T15:19:55,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741867_1043 (size=129) 2024-12-02T15:19:55,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741867_1043 (size=129) 2024-12-02T15:19:55,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741867_1043 (size=129) 2024-12-02T15:19:55,254 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 3feda63ca2050c61bf3633c0ebd2c0ff, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,255 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:19:55,256 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:19:55,256 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,257 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741868_1044 (size=891) 2024-12-02T15:19:55,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741868_1044 (size=891) 2024-12-02T15:19:55,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741868_1044 (size=891) 2024-12-02T15:19:55,271 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:19:55,279 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:19:55,279 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,282 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:19:55,282 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-12-02T15:19:55,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T15:19:55,284 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 628 msec 2024-12-02T15:19:55,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-12-02T15:19:55,794 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:19:55,795 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794 2024-12-02T15:19:55,795 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:55,848 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:19:55,848 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,852 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:19:55,859 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:19:55,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741870_1046 (size=891) 2024-12-02T15:19:55,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741870_1046 (size=891) 2024-12-02T15:19:55,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741870_1046 (size=891) 2024-12-02T15:19:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741869_1045 (size=197) 2024-12-02T15:19:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741869_1045 (size=197) 2024-12-02T15:19:55,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741869_1045 (size=197) 2024-12-02T15:19:55,921 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:55,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:55,922 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,162 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:19:56,895 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-11170256953024316934.jar 2024-12-02T15:19:56,896 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-3057464305380530447.jar 2024-12-02T15:19:56,963 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,964 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:19:56,965 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:19:56,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:19:56,966 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:19:56,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:19:56,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:19:56,967 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:19:56,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:19:56,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:19:56,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:19:56,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:19:56,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:19:56,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:19:56,972 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:19:56,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:19:56,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:19:56,973 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:19:56,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:19:56,974 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:19:57,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741871_1047 (size=24020) 2024-12-02T15:19:57,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741871_1047 (size=24020) 2024-12-02T15:19:57,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741871_1047 (size=24020) 2024-12-02T15:19:57,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741872_1048 (size=77755) 2024-12-02T15:19:57,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741872_1048 (size=77755) 2024-12-02T15:19:57,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741872_1048 (size=77755) 2024-12-02T15:19:57,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741873_1049 (size=131360) 2024-12-02T15:19:57,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741873_1049 (size=131360) 2024-12-02T15:19:57,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741873_1049 (size=131360) 2024-12-02T15:19:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741874_1050 (size=111793) 2024-12-02T15:19:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741874_1050 (size=111793) 2024-12-02T15:19:57,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741874_1050 (size=111793) 2024-12-02T15:19:57,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741875_1051 (size=1832290) 2024-12-02T15:19:57,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741875_1051 (size=1832290) 2024-12-02T15:19:57,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741875_1051 (size=1832290) 2024-12-02T15:19:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741876_1052 (size=8360005) 2024-12-02T15:19:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741876_1052 (size=8360005) 2024-12-02T15:19:57,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741876_1052 (size=8360005) 2024-12-02T15:19:57,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741877_1053 (size=503880) 2024-12-02T15:19:57,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741877_1053 (size=503880) 2024-12-02T15:19:57,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741877_1053 (size=503880) 2024-12-02T15:19:57,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741878_1054 (size=322274) 2024-12-02T15:19:57,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741878_1054 (size=322274) 2024-12-02T15:19:57,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741878_1054 (size=322274) 2024-12-02T15:19:57,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741879_1055 (size=20406) 2024-12-02T15:19:57,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741879_1055 (size=20406) 2024-12-02T15:19:57,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741879_1055 (size=20406) 2024-12-02T15:19:57,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741880_1056 (size=45609) 2024-12-02T15:19:57,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741880_1056 (size=45609) 2024-12-02T15:19:57,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741880_1056 (size=45609) 2024-12-02T15:19:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741881_1057 (size=136454) 2024-12-02T15:19:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741881_1057 (size=136454) 2024-12-02T15:19:57,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741881_1057 (size=136454) 2024-12-02T15:19:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741882_1058 (size=1597136) 2024-12-02T15:19:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741882_1058 (size=1597136) 2024-12-02T15:19:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741882_1058 (size=1597136) 2024-12-02T15:19:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741883_1059 (size=30873) 2024-12-02T15:19:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741883_1059 (size=30873) 2024-12-02T15:19:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741883_1059 (size=30873) 2024-12-02T15:19:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741884_1060 (size=29229) 2024-12-02T15:19:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741884_1060 (size=29229) 2024-12-02T15:19:57,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741884_1060 (size=29229) 2024-12-02T15:19:57,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741885_1061 (size=903849) 2024-12-02T15:19:57,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741885_1061 (size=903849) 2024-12-02T15:19:57,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741885_1061 (size=903849) 2024-12-02T15:19:57,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741886_1062 (size=6424745) 2024-12-02T15:19:57,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741886_1062 (size=6424745) 2024-12-02T15:19:57,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741886_1062 (size=6424745) 2024-12-02T15:19:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741887_1063 (size=5175431) 2024-12-02T15:19:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741887_1063 (size=5175431) 2024-12-02T15:19:57,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741887_1063 (size=5175431) 2024-12-02T15:19:57,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741888_1064 (size=232881) 2024-12-02T15:19:57,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741888_1064 (size=232881) 2024-12-02T15:19:57,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741888_1064 (size=232881) 2024-12-02T15:19:57,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741889_1065 (size=1323991) 2024-12-02T15:19:57,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741889_1065 (size=1323991) 2024-12-02T15:19:57,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741889_1065 (size=1323991) 2024-12-02T15:19:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741890_1066 (size=4695811) 2024-12-02T15:19:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741890_1066 (size=4695811) 2024-12-02T15:19:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741890_1066 (size=4695811) 2024-12-02T15:19:57,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741891_1067 (size=1877034) 2024-12-02T15:19:57,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741891_1067 (size=1877034) 2024-12-02T15:19:57,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741891_1067 (size=1877034) 2024-12-02T15:19:57,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741892_1068 (size=443171) 2024-12-02T15:19:57,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741892_1068 (size=443171) 2024-12-02T15:19:57,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741892_1068 (size=443171) 2024-12-02T15:19:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741893_1069 (size=217555) 2024-12-02T15:19:57,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741893_1069 (size=217555) 2024-12-02T15:19:57,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741893_1069 (size=217555) 2024-12-02T15:19:57,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741894_1070 (size=4188619) 2024-12-02T15:19:57,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741894_1070 (size=4188619) 2024-12-02T15:19:57,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741894_1070 (size=4188619) 2024-12-02T15:19:57,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741895_1071 (size=127628) 2024-12-02T15:19:57,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741895_1071 (size=127628) 2024-12-02T15:19:57,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741895_1071 (size=127628) 2024-12-02T15:19:57,612 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:19:57,616 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-12-02T15:19:57,621 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=3feda63ca2050c61bf3633c0ebd2c0ff-85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_. 2024-12-02T15:19:57,621 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=3feda63ca2050c61bf3633c0ebd2c0ff-85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_. 2024-12-02T15:19:57,622 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-12-02T15:19:57,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741896_1072 (size=244) 2024-12-02T15:19:57,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741896_1072 (size=244) 2024-12-02T15:19:57,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741896_1072 (size=244) 2024-12-02T15:19:57,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741897_1073 (size=17) 2024-12-02T15:19:57,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741897_1073 (size=17) 2024-12-02T15:19:57,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741897_1073 (size=17) 2024-12-02T15:19:57,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741898_1074 (size=304137) 2024-12-02T15:19:57,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741898_1074 (size=304137) 2024-12-02T15:19:57,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741898_1074 (size=304137) 2024-12-02T15:19:58,489 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:19:58,489 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:19:59,028 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0001_000001 (auth:SIMPLE) from 127.0.0.1:60290 2024-12-02T15:20:01,774 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:20:03,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T15:20:03,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T15:20:12,630 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0001_000001 (auth:SIMPLE) from 127.0.0.1:57566 2024-12-02T15:20:12,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741899_1075 (size=349835) 2024-12-02T15:20:12,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741899_1075 (size=349835) 2024-12-02T15:20:12,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741899_1075 (size=349835) 2024-12-02T15:20:14,899 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0001_000001 (auth:SIMPLE) from 127.0.0.1:47750 2024-12-02T15:20:17,949 INFO [master/be0458f36726:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-02T15:20:17,949 INFO [master/be0458f36726:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-02T15:20:27,578 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fb3d55cc4b4cfd19ef91f04ca0bd2813, had cached 0 bytes from a total of 8188 2024-12-02T15:20:27,591 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 00df70f91cea80c6193fbc6c5ad9d3b9, had cached 0 bytes from a total of 5424 2024-12-02T15:20:31,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:20:36,335 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 902a36716bdb460286a5cda0e9c63607, had cached 0 bytes from a total of 320414712 2024-12-02T15:20:38,028 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 00df70f91cea80c6193fbc6c5ad9d3b9 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:20:38,028 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 82c95700d9ff2efea2507bd84223c4b0 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:20:38,028 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region fb3d55cc4b4cfd19ef91f04ca0bd2813 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:20:38,963 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f157ceef3558c1ef6b99a6ec43b90edd, had cached 0 bytes from a total of 320414712 2024-12-02T15:20:55,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741900_1076 (size=134217728) 2024-12-02T15:20:55,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741900_1076 (size=134217728) 2024-12-02T15:20:55,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741900_1076 (size=134217728) 2024-12-02T15:21:01,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:21:06,532 WARN [DataXceiver for client DFSClient_attempt_1733152780087_0001_m_000000_0_-665005550_1 at /127.0.0.1:52138 [Receiving block BP-1550669498-172.17.0.2-1733152767991:blk_1073741901_1077] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 4386ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/, blockId=1073741901, seqno=2553 2024-12-02T15:21:12,579 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region fb3d55cc4b4cfd19ef91f04ca0bd2813, had cached 0 bytes from a total of 8188 2024-12-02T15:21:12,591 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 00df70f91cea80c6193fbc6c5ad9d3b9, had cached 0 bytes from a total of 5424 2024-12-02T15:21:21,336 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 902a36716bdb460286a5cda0e9c63607, had cached 0 bytes from a total of 320414712 2024-12-02T15:21:23,963 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region f157ceef3558c1ef6b99a6ec43b90edd, had cached 0 bytes from a total of 320414712 2024-12-02T15:21:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741901_1077 (size=134217728) 2024-12-02T15:21:28,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741901_1077 (size=134217728) 2024-12-02T15:21:28,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741901_1077 (size=134217728) 2024-12-02T15:21:31,775 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:21:37,959 WARN [DataXceiver for client DFSClient_attempt_1733152780087_0001_m_000000_0_-665005550_1 at /127.0.0.1:43860 [Receiving block BP-1550669498-172.17.0.2-1733152767991:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 3842ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/, blockId=1073741902, seqno=4551 2024-12-02T15:21:37,959 WARN [DataXceiver for client DFSClient_attempt_1733152780087_0001_m_000000_0_-665005550_1 at /127.0.0.1:45722 [Receiving block BP-1550669498-172.17.0.2-1733152767991:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 3842ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/, blockId=1073741902, seqno=4551 2024-12-02T15:21:37,959 WARN [DataXceiver for client DFSClient_attempt_1733152780087_0001_m_000000_0_-665005550_1 at /127.0.0.1:45268 [Receiving block BP-1550669498-172.17.0.2-1733152767991:blk_1073741902_1078] {}] datanode.BlockReceiver(767): Slow BlockReceiver write data to disk cost: 3842ms (threshold=300ms), volume=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/, blockId=1073741902, seqno=4551 2024-12-02T15:21:42,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741902_1078 (size=51979256) 2024-12-02T15:21:42,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741902_1078 (size=51979256) 2024-12-02T15:21:42,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741902_1078 (size=51979256) 2024-12-02T15:21:42,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741903_1079 (size=17520) 2024-12-02T15:21:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741903_1079 (size=17520) 2024-12-02T15:21:42,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741903_1079 (size=17520) 2024-12-02T15:21:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741904_1080 (size=482) 2024-12-02T15:21:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741904_1080 (size=482) 2024-12-02T15:21:42,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741904_1080 (size=482) 2024-12-02T15:21:42,785 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000002/launch_container.sh] 2024-12-02T15:21:42,786 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000002/container_tokens] 2024-12-02T15:21:42,786 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000002/sysfs] 2024-12-02T15:21:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741905_1081 (size=17520) 2024-12-02T15:21:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741905_1081 (size=17520) 2024-12-02T15:21:42,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741905_1081 (size=17520) 2024-12-02T15:21:42,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741906_1082 (size=349835) 2024-12-02T15:21:42,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741906_1082 (size=349835) 2024-12-02T15:21:42,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741906_1082 (size=349835) 2024-12-02T15:21:42,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0001_000001 (auth:SIMPLE) from 127.0.0.1:43832 2024-12-02T15:21:44,247 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:21:44,249 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:21:44,257 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:21:44,257 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:21:44,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T15:21:44,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T15:21:44,258 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-12-02T15:21:44,259 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152795794/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-12-02T15:21:44,273 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,301 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152904300"}]},"ts":"1733152904300"} 2024-12-02T15:21:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T15:21:44,303 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T15:21:44,303 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T15:21:44,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-12-02T15:21:44,311 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, UNASSIGN}] 2024-12-02T15:21:44,312 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, UNASSIGN 2024-12-02T15:21:44,312 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, UNASSIGN 2024-12-02T15:21:44,313 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=902a36716bdb460286a5cda0e9c63607, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:21:44,313 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=f157ceef3558c1ef6b99a6ec43b90edd, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:21:44,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, UNASSIGN because future has completed 2024-12-02T15:21:44,317 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:21:44,317 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:21:44,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, UNASSIGN because future has completed 2024-12-02T15:21:44,318 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:21:44,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:21:44,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T15:21:44,471 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:21:44,471 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:21:44,471 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 902a36716bdb460286a5cda0e9c63607, disabling compactions & flushes 2024-12-02T15:21:44,471 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:21:44,471 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:21:44,471 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. after waiting 0 ms 2024-12-02T15:21:44,471 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:21:44,476 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T15:21:44,477 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:21:44,477 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607. 2024-12-02T15:21:44,477 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 902a36716bdb460286a5cda0e9c63607: Waiting for close lock at 1733152904471Running coprocessor pre-close hooks at 1733152904471Disabling compacts and flushes for region at 1733152904471Disabling writes for close at 1733152904471Writing region close event to WAL at 1733152904472 (+1 ms)Running coprocessor post-close hooks at 1733152904477 (+5 ms)Closed at 1733152904477 2024-12-02T15:21:44,479 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 902a36716bdb460286a5cda0e9c63607 2024-12-02T15:21:44,479 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:21:44,480 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:21:44,480 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing f157ceef3558c1ef6b99a6ec43b90edd, disabling compactions & flushes 2024-12-02T15:21:44,480 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:21:44,480 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:21:44,480 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. after waiting 0 ms 2024-12-02T15:21:44,480 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:21:44,481 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=902a36716bdb460286a5cda0e9c63607, regionState=CLOSED 2024-12-02T15:21:44,483 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:21:44,489 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-12-02T15:21:44,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=33 2024-12-02T15:21:44,490 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 902a36716bdb460286a5cda0e9c63607, server=be0458f36726,43025,1733152773677 in 170 msec 2024-12-02T15:21:44,490 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:21:44,490 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd. 2024-12-02T15:21:44,490 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for f157ceef3558c1ef6b99a6ec43b90edd: Waiting for close lock at 1733152904480Running coprocessor pre-close hooks at 1733152904480Disabling compacts and flushes for region at 1733152904480Disabling writes for close at 1733152904480Writing region close event to WAL at 1733152904483 (+3 ms)Running coprocessor post-close hooks at 1733152904490 (+7 ms)Closed at 1733152904490 2024-12-02T15:21:44,491 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=902a36716bdb460286a5cda0e9c63607, UNASSIGN in 180 msec 2024-12-02T15:21:44,492 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:21:44,493 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=f157ceef3558c1ef6b99a6ec43b90edd, regionState=CLOSED 2024-12-02T15:21:44,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:21:44,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=34 2024-12-02T15:21:44,498 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure f157ceef3558c1ef6b99a6ec43b90edd, server=be0458f36726,43025,1733152773677 in 178 msec 2024-12-02T15:21:44,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=34, resume processing ppid=32 2024-12-02T15:21:44,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=f157ceef3558c1ef6b99a6ec43b90edd, UNASSIGN in 188 msec 2024-12-02T15:21:44,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-12-02T15:21:44,502 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 196 msec 2024-12-02T15:21:44,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152904503"}]},"ts":"1733152904503"} 2024-12-02T15:21:44,505 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T15:21:44,505 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T15:21:44,508 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 230 msec 2024-12-02T15:21:44,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-12-02T15:21:44,613 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:21:44,618 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,625 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,627 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,630 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,636 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:21:44,636 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607 2024-12-02T15:21:44,636 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:21:44,640 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/recovered.edits] 2024-12-02T15:21:44,640 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/recovered.edits] 2024-12-02T15:21:44,640 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/recovered.edits] 2024-12-02T15:21:44,648 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:21:44,648 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_ 2024-12-02T15:21:44,648 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/cf/85fbe924ff0f471b8097dd8166a6b0d0_SeqId_4_.3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/recovered.edits/10.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd/recovered.edits/10.seqid 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/recovered.edits/6.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff/recovered.edits/6.seqid 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/recovered.edits/10.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607/recovered.edits/10.seqid 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/f157ceef3558c1ef6b99a6ec43b90edd 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/3feda63ca2050c61bf3633c0ebd2c0ff 2024-12-02T15:21:44,652 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportFileSystemStateWithSplitRegion/902a36716bdb460286a5cda0e9c63607 2024-12-02T15:21:44,652 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-12-02T15:21:44,655 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-02T15:21:44,664 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T15:21:44,669 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T15:21:44,671 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,671 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T15:21:44,671 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152904671"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:44,672 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152904671"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:44,672 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152904671"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:44,675 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-12-02T15:21:44,675 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 3feda63ca2050c61bf3633c0ebd2c0ff, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152784696.3feda63ca2050c61bf3633c0ebd2c0ff.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 902a36716bdb460286a5cda0e9c63607, NAME => 'testExportFileSystemStateWithSplitRegion,,1733152790429.902a36716bdb460286a5cda0e9c63607.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => f157ceef3558c1ef6b99a6ec43b90edd, NAME => 'testExportFileSystemStateWithSplitRegion,5,1733152790429.f157ceef3558c1ef6b99a6ec43b90edd.', STARTKEY => '5', ENDKEY => ''}] 2024-12-02T15:21:44,676 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T15:21:44,676 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152904676"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:44,679 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-12-02T15:21:44,680 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,681 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 61 msec 2024-12-02T15:21:44,721 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:44,729 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:44,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:44,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T15:21:44,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T15:21:44,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T15:21:44,730 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data null 2024-12-02T15:21:44,729 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:44,730 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:21:44,730 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:21:44,730 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:21:44,730 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:21:44,731 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:44,731 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:44,731 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:44,731 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:44,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-12-02T15:21:44,732 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,732 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:21:44,733 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:44,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T15:21:44,736 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152904736"}]},"ts":"1733152904736"} 2024-12-02T15:21:44,738 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-12-02T15:21:44,738 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-12-02T15:21:44,739 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-12-02T15:21:44,741 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, UNASSIGN}] 2024-12-02T15:21:44,742 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, UNASSIGN 2024-12-02T15:21:44,742 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, UNASSIGN 2024-12-02T15:21:44,743 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=fb3d55cc4b4cfd19ef91f04ca0bd2813, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:21:44,743 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=00df70f91cea80c6193fbc6c5ad9d3b9, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:21:44,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, UNASSIGN because future has completed 2024-12-02T15:21:44,746 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:21:44,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:21:44,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, UNASSIGN because future has completed 2024-12-02T15:21:44,747 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:21:44,747 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:21:44,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T15:21:44,900 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47699, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:21:44,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40257, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:21:44,901 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:21:44,901 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:21:44,901 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:21:44,901 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:21:44,901 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 00df70f91cea80c6193fbc6c5ad9d3b9, disabling compactions & flushes 2024-12-02T15:21:44,901 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing fb3d55cc4b4cfd19ef91f04ca0bd2813, disabling compactions & flushes 2024-12-02T15:21:44,902 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:21:44,902 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. after waiting 0 ms 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. after waiting 0 ms 2024-12-02T15:21:44,902 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:21:44,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:21:44,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:21:44,908 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:21:44,908 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:21:44,908 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813. 2024-12-02T15:21:44,908 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9. 2024-12-02T15:21:44,908 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for fb3d55cc4b4cfd19ef91f04ca0bd2813: Waiting for close lock at 1733152904901Running coprocessor pre-close hooks at 1733152904901Disabling compacts and flushes for region at 1733152904901Disabling writes for close at 1733152904902 (+1 ms)Writing region close event to WAL at 1733152904903 (+1 ms)Running coprocessor post-close hooks at 1733152904908 (+5 ms)Closed at 1733152904908 2024-12-02T15:21:44,908 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 00df70f91cea80c6193fbc6c5ad9d3b9: Waiting for close lock at 1733152904901Running coprocessor pre-close hooks at 1733152904901Disabling compacts and flushes for region at 1733152904901Disabling writes for close at 1733152904902 (+1 ms)Writing region close event to WAL at 1733152904903 (+1 ms)Running coprocessor post-close hooks at 1733152904908 (+5 ms)Closed at 1733152904908 2024-12-02T15:21:44,910 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:21:44,911 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=fb3d55cc4b4cfd19ef91f04ca0bd2813, regionState=CLOSED 2024-12-02T15:21:44,912 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=00df70f91cea80c6193fbc6c5ad9d3b9, regionState=CLOSED 2024-12-02T15:21:44,912 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:21:44,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:21:44,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-12-02T15:21:44,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure fb3d55cc4b4cfd19ef91f04ca0bd2813, server=be0458f36726,45405,1733152773787 in 169 msec 2024-12-02T15:21:44,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:21:44,923 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-12-02T15:21:44,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=fb3d55cc4b4cfd19ef91f04ca0bd2813, UNASSIGN in 177 msec 2024-12-02T15:21:44,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 00df70f91cea80c6193fbc6c5ad9d3b9, server=be0458f36726,35155,1733152773848 in 173 msec 2024-12-02T15:21:44,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=40, resume processing ppid=39 2024-12-02T15:21:44,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=00df70f91cea80c6193fbc6c5ad9d3b9, UNASSIGN in 183 msec 2024-12-02T15:21:44,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-12-02T15:21:44,930 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 189 msec 2024-12-02T15:21:44,932 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152904931"}]},"ts":"1733152904931"} 2024-12-02T15:21:44,934 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-12-02T15:21:44,934 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-12-02T15:21:44,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 202 msec 2024-12-02T15:21:45,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-12-02T15:21:45,053 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:21:45,053 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,055 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,056 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,059 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,061 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:21:45,061 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:21:45,065 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/recovered.edits] 2024-12-02T15:21:45,065 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/recovered.edits] 2024-12-02T15:21:45,071 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T15:21:45,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T15:21:45,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T15:21:45,072 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-12-02T15:21:45,076 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/cf/17d168090aeb4ea89199d70ca4ff63bb 2024-12-02T15:21:45,076 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/cf/ea734200e4034710856ca8335dabc7e6 2024-12-02T15:21:45,079 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813/recovered.edits/9.seqid 2024-12-02T15:21:45,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,079 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,080 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,080 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,080 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9/recovered.edits/9.seqid 2024-12-02T15:21:45,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-02T15:21:45,081 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/fb3d55cc4b4cfd19ef91f04ca0bd2813 2024-12-02T15:21:45,081 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSplitRegion/00df70f91cea80c6193fbc6c5ad9d3b9 2024-12-02T15:21:45,081 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-12-02T15:21:45,081 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,084 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,087 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-12-02T15:21:45,094 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-12-02T15:21:45,095 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,095 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-12-02T15:21:45,096 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152905096"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:45,096 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152905096"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:45,099 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:21:45,099 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 00df70f91cea80c6193fbc6c5ad9d3b9, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1733152782066.00df70f91cea80c6193fbc6c5ad9d3b9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => fb3d55cc4b4cfd19ef91f04ca0bd2813, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1733152782066.fb3d55cc4b4cfd19ef91f04ca0bd2813.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:21:45,099 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-12-02T15:21:45,100 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152905100"}]},"ts":"9223372036854775807"} 2024-12-02T15:21:45,103 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-12-02T15:21:45,105 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,106 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 51 msec 2024-12-02T15:21:45,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-12-02T15:21:45,183 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,183 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-12-02T15:21:45,216 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T15:21:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T15:21:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,227 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-12-02T15:21:45,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:45,265 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=769 (was 725) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 111075) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-255930616_1 at /127.0.0.1:49166 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1375 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-255930616_1 at /127.0.0.1:58404 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:42482 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:48676 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:40015 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:40015 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:37158 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/be0458f36726:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=797 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=553 (was 231) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=563 (was 3680) 2024-12-02T15:21:45,266 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=769 is superior to 500 2024-12-02T15:21:45,285 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=769, OpenFileDescriptor=797, MaxFileDescriptor=1048576, SystemLoadAverage=553, ProcessCount=17, AvailableMemoryMB=561 2024-12-02T15:21:45,285 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=769 is superior to 500 2024-12-02T15:21:45,287 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:21:45,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:21:45,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:21:45,290 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:21:45,290 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-12-02T15:21:45,291 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:21:45,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T15:21:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741907_1083 (size=406) 2024-12-02T15:21:45,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741907_1083 (size=406) 2024-12-02T15:21:45,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741907_1083 (size=406) 2024-12-02T15:21:45,307 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d6bc8d8e61dabca25846dd8d50e1ea8f, NAME => 'testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:21:45,307 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => af7631d267d518d88b1ac02b61113820, NAME => 'testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:21:45,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741908_1084 (size=67) 2024-12-02T15:21:45,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741908_1084 (size=67) 2024-12-02T15:21:45,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741908_1084 (size=67) 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing af7631d267d518d88b1ac02b61113820, disabling compactions & flushes 2024-12-02T15:21:45,323 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. after waiting 0 ms 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,323 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,323 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for af7631d267d518d88b1ac02b61113820: Waiting for close lock at 1733152905323Disabling compacts and flushes for region at 1733152905323Disabling writes for close at 1733152905323Writing region close event to WAL at 1733152905323Closed at 1733152905323 2024-12-02T15:21:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741909_1085 (size=67) 2024-12-02T15:21:45,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741909_1085 (size=67) 2024-12-02T15:21:45,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741909_1085 (size=67) 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing d6bc8d8e61dabca25846dd8d50e1ea8f, disabling compactions & flushes 2024-12-02T15:21:45,362 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. after waiting 0 ms 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,362 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,362 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for d6bc8d8e61dabca25846dd8d50e1ea8f: Waiting for close lock at 1733152905362Disabling compacts and flushes for region at 1733152905362Disabling writes for close at 1733152905362Writing region close event to WAL at 1733152905362Closed at 1733152905362 2024-12-02T15:21:45,364 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:21:45,365 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733152905364"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152905364"}]},"ts":"1733152905364"} 2024-12-02T15:21:45,365 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733152905364"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152905364"}]},"ts":"1733152905364"} 2024-12-02T15:21:45,368 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:21:45,369 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:21:45,369 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152905369"}]},"ts":"1733152905369"} 2024-12-02T15:21:45,371 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-02T15:21:45,372 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:21:45,373 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:21:45,373 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:21:45,373 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:21:45,373 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:21:45,374 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:21:45,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, ASSIGN}] 2024-12-02T15:21:45,375 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, ASSIGN 2024-12-02T15:21:45,375 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, ASSIGN 2024-12-02T15:21:45,377 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:21:45,377 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:21:45,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T15:21:45,527 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:21:45,528 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d6bc8d8e61dabca25846dd8d50e1ea8f, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:21:45,528 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=af7631d267d518d88b1ac02b61113820, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:21:45,530 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, ASSIGN because future has completed 2024-12-02T15:21:45,530 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:21:45,532 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, ASSIGN because future has completed 2024-12-02T15:21:45,535 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:21:45,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T15:21:45,690 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,690 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => d6bc8d8e61dabca25846dd8d50e1ea8f, NAME => 'testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:21:45,691 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. service=AccessControlService 2024-12-02T15:21:45,691 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:21:45,691 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,691 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:21:45,691 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,691 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,693 INFO [StoreOpener-d6bc8d8e61dabca25846dd8d50e1ea8f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,694 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,695 INFO [StoreOpener-d6bc8d8e61dabca25846dd8d50e1ea8f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6bc8d8e61dabca25846dd8d50e1ea8f columnFamilyName cf 2024-12-02T15:21:45,695 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => af7631d267d518d88b1ac02b61113820, NAME => 'testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:21:45,695 DEBUG [StoreOpener-d6bc8d8e61dabca25846dd8d50e1ea8f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:21:45,695 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. service=AccessControlService 2024-12-02T15:21:45,695 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:21:45,695 INFO [StoreOpener-d6bc8d8e61dabca25846dd8d50e1ea8f-1 {}] regionserver.HStore(327): Store=d6bc8d8e61dabca25846dd8d50e1ea8f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:21:45,695 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,696 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:21:45,696 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,696 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,696 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,697 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,697 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,697 INFO [StoreOpener-af7631d267d518d88b1ac02b61113820-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,698 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,698 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,699 INFO [StoreOpener-af7631d267d518d88b1ac02b61113820-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region af7631d267d518d88b1ac02b61113820 columnFamilyName cf 2024-12-02T15:21:45,699 DEBUG [StoreOpener-af7631d267d518d88b1ac02b61113820-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:21:45,699 INFO [StoreOpener-af7631d267d518d88b1ac02b61113820-1 {}] regionserver.HStore(327): Store=af7631d267d518d88b1ac02b61113820/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:21:45,700 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,700 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,700 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,701 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,701 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,701 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,703 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,703 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:21:45,704 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened d6bc8d8e61dabca25846dd8d50e1ea8f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65457384, jitterRate=-0.024608969688415527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:21:45,704 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,704 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for d6bc8d8e61dabca25846dd8d50e1ea8f: Running coprocessor pre-open hook at 1733152905691Writing region info on filesystem at 1733152905691Initializing all the Stores at 1733152905692 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152905693 (+1 ms)Cleaning up temporary data from old regions at 1733152905698 (+5 ms)Running coprocessor post-open hooks at 1733152905704 (+6 ms)Region opened successfully at 1733152905704 2024-12-02T15:21:45,705 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f., pid=49, masterSystemTime=1733152905686 2024-12-02T15:21:45,706 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:21:45,706 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened af7631d267d518d88b1ac02b61113820; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62529618, jitterRate=-0.0682360827922821}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:21:45,706 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:45,707 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for af7631d267d518d88b1ac02b61113820: Running coprocessor pre-open hook at 1733152905696Writing region info on filesystem at 1733152905696Initializing all the Stores at 1733152905697 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152905697Cleaning up temporary data from old regions at 1733152905701 (+4 ms)Running coprocessor post-open hooks at 1733152905706 (+5 ms)Region opened successfully at 1733152905707 (+1 ms) 2024-12-02T15:21:45,707 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,707 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:45,708 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., pid=48, masterSystemTime=1733152905686 2024-12-02T15:21:45,708 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=d6bc8d8e61dabca25846dd8d50e1ea8f, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:21:45,710 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:21:45,710 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:45,710 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=af7631d267d518d88b1ac02b61113820, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:21:45,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-12-02T15:21:45,713 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677 in 175 msec 2024-12-02T15:21:45,713 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:21:45,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, ASSIGN in 339 msec 2024-12-02T15:21:45,716 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-12-02T15:21:45,717 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787 in 185 msec 2024-12-02T15:21:45,718 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=47, resume processing ppid=45 2024-12-02T15:21:45,718 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, ASSIGN in 342 msec 2024-12-02T15:21:45,719 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:21:45,719 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152905719"}]},"ts":"1733152905719"} 2024-12-02T15:21:45,721 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-02T15:21:45,722 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:21:45,723 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-02T15:21:45,727 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T15:21:45,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,737 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:21:45,746 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:45,747 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:45,747 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:45,747 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-02T15:21:45,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 459 msec 2024-12-02T15:21:45,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-12-02T15:21:45,913 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T15:21:45,913 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-02T15:21:45,913 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:21:45,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-02T15:21:45,918 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:21:45,918 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithTargetName assigned. 2024-12-02T15:21:45,918 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T15:21:45,921 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T15:21:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152905921 (current time:1733152905921). 2024-12-02T15:21:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:21:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T15:21:45,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:21:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f292e3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:21:45,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:21:45,924 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:21:45,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:21:45,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:21:45,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b5e2eb5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:21:45,924 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:21:45,925 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,925 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52722, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:21:45,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30e0a318, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:21:45,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:21:45,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:45,929 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43446, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:45,930 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:21:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:21:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,931 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:21:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2279365e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:21:45,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:21:45,932 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:21:45,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:21:45,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:21:45,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75a2c10b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:21:45,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:21:45,933 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,934 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52746, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:21:45,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@792de6c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:45,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:21:45,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:21:45,937 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:45,938 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43448, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:45,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:21:45,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:45,941 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60312, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:45,942 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:21:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:21:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:45,943 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:21:45,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T15:21:45,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:21:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T15:21:45,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T15:21:45,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:21:45,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T15:21:45,947 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:21:45,950 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:21:45,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741910_1086 (size=167) 2024-12-02T15:21:45,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741910_1086 (size=167) 2024-12-02T15:21:45,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741910_1086 (size=167) 2024-12-02T15:21:45,965 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:21:45,965 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820}] 2024-12-02T15:21:45,966 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:45,966 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T15:21:46,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-12-02T15:21:46,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-12-02T15:21:46,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for d6bc8d8e61dabca25846dd8d50e1ea8f: 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:21:46,120 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for af7631d267d518d88b1ac02b61113820: 2024-12-02T15:21:46,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. for emptySnaptb0-testExportWithTargetName completed. 2024-12-02T15:21:46,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-02T15:21:46,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:21:46,121 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:21:46,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741911_1087 (size=70) 2024-12-02T15:21:46,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741911_1087 (size=70) 2024-12-02T15:21:46,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741911_1087 (size=70) 2024-12-02T15:21:46,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:46,150 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-12-02T15:21:46,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-12-02T15:21:46,151 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:46,151 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:46,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f in 187 msec 2024-12-02T15:21:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741912_1088 (size=70) 2024-12-02T15:21:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741912_1088 (size=70) 2024-12-02T15:21:46,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741912_1088 (size=70) 2024-12-02T15:21:46,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:46,170 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-02T15:21:46,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-12-02T15:21:46,171 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,171 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=52, resume processing ppid=50 2024-12-02T15:21:46,176 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:21:46,176 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 in 207 msec 2024-12-02T15:21:46,177 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:21:46,178 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:21:46,179 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-02T15:21:46,183 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-02T15:21:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741913_1089 (size=549) 2024-12-02T15:21:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741913_1089 (size=549) 2024-12-02T15:21:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741913_1089 (size=549) 2024-12-02T15:21:46,199 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:21:46,212 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:21:46,213 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-02T15:21:46,214 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:21:46,214 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-12-02T15:21:46,216 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 270 msec 2024-12-02T15:21:46,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-12-02T15:21:46,264 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T15:21:46,271 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='05d3e7ed029c18cda3a2e349928f7c905', locateType=CURRENT is [region=testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:21:46,273 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='1134555c59e7cf792547884aff2293c99', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:21:46,274 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='2994f0659c8180ed377466ed905f8c694', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:21:46,276 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='33142eeb9cf09f691b7c9b4989d8a022d', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:21:46,277 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='5223ad5177528482ac82c680f921694a0', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:21:46,277 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:46,279 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithTargetName', row='4206ff7fd895ee006fee660ea4cd05410', locateType=CURRENT is [region=testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:21:46,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60318, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:46,282 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:21:46,283 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:21:46,285 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T15:21:46,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-12-02T15:21:46,288 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:46,288 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:21:46,290 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T15:21:46,297 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T15:21:46,306 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-12-02T15:21:46,309 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T15:21:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152906309 (current time:1733152906309). 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4da0dcd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:21:46,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:21:46,311 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:21:46,311 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:21:46,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:21:46,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45ef28f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:21:46,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:21:46,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,314 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52772, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:21:46,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@473cb3ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:21:46,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:21:46,316 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:46,317 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43460, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:46,318 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:21:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:21:46,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,319 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:21:46,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11146be2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:21:46,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:21:46,320 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4be0ea55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:21:46,321 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,322 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52804, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:21:46,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a833492, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:21:46,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:21:46,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:21:46,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:46,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43462, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:46,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:21:46,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:21:46,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:21:46,331 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:21:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:21:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:21:46,331 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:21:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-02T15:21:46,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:21:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-02T15:21:46,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T15:21:46,334 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:21:46,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T15:21:46,335 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:21:46,339 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:21:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741914_1090 (size=162) 2024-12-02T15:21:46,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741914_1090 (size=162) 2024-12-02T15:21:46,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741914_1090 (size=162) 2024-12-02T15:21:46,355 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:21:46,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820}] 2024-12-02T15:21:46,357 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:46,357 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T15:21:46,509 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-12-02T15:21:46,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:46,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing d6bc8d8e61dabca25846dd8d50e1ea8f 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-02T15:21:46,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-12-02T15:21:46,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:46,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing af7631d267d518d88b1ac02b61113820 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-02T15:21:46,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/.tmp/cf/bbc593ce93cf41d6bc5d6fc019bb02bb is 71, key is 0004db7cf709a5846609dbbfd04cc506/cf:q/1733152906283/Put/seqid=0 2024-12-02T15:21:46,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/.tmp/cf/af683d2283ad46818e1ed9e08001febc is 71, key is 23263f65d1ce3f15fcfcdd931f65b31a/cf:q/1733152906282/Put/seqid=0 2024-12-02T15:21:46,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741915_1091 (size=5566) 2024-12-02T15:21:46,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741915_1091 (size=5566) 2024-12-02T15:21:46,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741915_1091 (size=5566) 2024-12-02T15:21:46,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/.tmp/cf/bbc593ce93cf41d6bc5d6fc019bb02bb 2024-12-02T15:21:46,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/.tmp/cf/bbc593ce93cf41d6bc5d6fc019bb02bb as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb 2024-12-02T15:21:46,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741916_1092 (size=8054) 2024-12-02T15:21:46,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741916_1092 (size=8054) 2024-12-02T15:21:46,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741916_1092 (size=8054) 2024-12-02T15:21:46,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/.tmp/cf/af683d2283ad46818e1ed9e08001febc 2024-12-02T15:21:46,585 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb, entries=7, sequenceid=6, filesize=5.4 K 2024-12-02T15:21:46,587 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for d6bc8d8e61dabca25846dd8d50e1ea8f in 78ms, sequenceid=6, compaction requested=false 2024-12-02T15:21:46,587 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-02T15:21:46,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/.tmp/cf/af683d2283ad46818e1ed9e08001febc as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc 2024-12-02T15:21:46,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for d6bc8d8e61dabca25846dd8d50e1ea8f: 2024-12-02T15:21:46,588 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. for snaptb0-testExportWithTargetName completed. 2024-12-02T15:21:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T15:21:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:21:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb] hfiles 2024-12-02T15:21:46,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb for snapshot=snaptb0-testExportWithTargetName 2024-12-02T15:21:46,599 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc, entries=43, sequenceid=6, filesize=7.9 K 2024-12-02T15:21:46,600 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for af7631d267d518d88b1ac02b61113820 in 90ms, sequenceid=6, compaction requested=false 2024-12-02T15:21:46,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for af7631d267d518d88b1ac02b61113820: 2024-12-02T15:21:46,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. for snaptb0-testExportWithTargetName completed. 2024-12-02T15:21:46,600 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-02T15:21:46,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:21:46,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc] hfiles 2024-12-02T15:21:46,601 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc for snapshot=snaptb0-testExportWithTargetName 2024-12-02T15:21:46,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741917_1093 (size=109) 2024-12-02T15:21:46,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741917_1093 (size=109) 2024-12-02T15:21:46,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741917_1093 (size=109) 2024-12-02T15:21:46,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:21:46,604 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-02T15:21:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-12-02T15:21:46,605 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:46,605 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:21:46,608 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f in 251 msec 2024-12-02T15:21:46,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741918_1094 (size=109) 2024-12-02T15:21:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741918_1094 (size=109) 2024-12-02T15:21:46,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741918_1094 (size=109) 2024-12-02T15:21:46,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:21:46,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-12-02T15:21:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-12-02T15:21:46,615 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 2024-12-02T15:21:46,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=55, resume processing ppid=53 2024-12-02T15:21:46,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure af7631d267d518d88b1ac02b61113820 in 261 msec 2024-12-02T15:21:46,617 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:21:46,618 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:21:46,618 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:21:46,618 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-02T15:21:46,619 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-02T15:21:46,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741919_1095 (size=627) 2024-12-02T15:21:46,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741919_1095 (size=627) 2024-12-02T15:21:46,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741919_1095 (size=627) 2024-12-02T15:21:46,634 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:21:46,640 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:21:46,640 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T15:21:46,642 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:21:46,642 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-12-02T15:21:46,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 310 msec 2024-12-02T15:21:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-12-02T15:21:46,654 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T15:21:46,654 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654 2024-12-02T15:21:46,654 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:21:46,680 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:21:46,680 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T15:21:46,683 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:21:46,693 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-02T15:21:46,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741920_1096 (size=162) 2024-12-02T15:21:46,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741921_1097 (size=627) 2024-12-02T15:21:46,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741920_1096 (size=162) 2024-12-02T15:21:46,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741921_1097 (size=627) 2024-12-02T15:21:46,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741921_1097 (size=627) 2024-12-02T15:21:46,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741920_1096 (size=162) 2024-12-02T15:21:46,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741922_1098 (size=154) 2024-12-02T15:21:46,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741922_1098 (size=154) 2024-12-02T15:21:46,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741922_1098 (size=154) 2024-12-02T15:21:46,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:46,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:46,720 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-16538516580280427786.jar 2024-12-02T15:21:47,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,651 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-14568992909790291433.jar 2024-12-02T15:21:47,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:21:47,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:21:47,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:21:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:21:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:21:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:21:47,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:21:47,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:21:47,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:21:47,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741923_1099 (size=24020) 2024-12-02T15:21:47,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741923_1099 (size=24020) 2024-12-02T15:21:47,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741923_1099 (size=24020) 2024-12-02T15:21:47,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741924_1100 (size=77755) 2024-12-02T15:21:47,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741924_1100 (size=77755) 2024-12-02T15:21:47,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741924_1100 (size=77755) 2024-12-02T15:21:47,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741925_1101 (size=131360) 2024-12-02T15:21:47,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741925_1101 (size=131360) 2024-12-02T15:21:47,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741925_1101 (size=131360) 2024-12-02T15:21:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741926_1102 (size=111793) 2024-12-02T15:21:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741926_1102 (size=111793) 2024-12-02T15:21:47,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741926_1102 (size=111793) 2024-12-02T15:21:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741927_1103 (size=1832290) 2024-12-02T15:21:47,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741927_1103 (size=1832290) 2024-12-02T15:21:47,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741927_1103 (size=1832290) 2024-12-02T15:21:47,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741928_1104 (size=8360005) 2024-12-02T15:21:47,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741928_1104 (size=8360005) 2024-12-02T15:21:47,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741928_1104 (size=8360005) 2024-12-02T15:21:47,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741929_1105 (size=503880) 2024-12-02T15:21:47,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741929_1105 (size=503880) 2024-12-02T15:21:47,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741929_1105 (size=503880) 2024-12-02T15:21:47,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741930_1106 (size=322274) 2024-12-02T15:21:47,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741930_1106 (size=322274) 2024-12-02T15:21:47,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741930_1106 (size=322274) 2024-12-02T15:21:47,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741931_1107 (size=20406) 2024-12-02T15:21:47,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741931_1107 (size=20406) 2024-12-02T15:21:47,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741931_1107 (size=20406) 2024-12-02T15:21:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741932_1108 (size=45609) 2024-12-02T15:21:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741932_1108 (size=45609) 2024-12-02T15:21:47,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741932_1108 (size=45609) 2024-12-02T15:21:47,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741933_1109 (size=136454) 2024-12-02T15:21:47,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741933_1109 (size=136454) 2024-12-02T15:21:47,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741933_1109 (size=136454) 2024-12-02T15:21:47,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741934_1110 (size=1597136) 2024-12-02T15:21:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741934_1110 (size=1597136) 2024-12-02T15:21:47,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741934_1110 (size=1597136) 2024-12-02T15:21:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741935_1111 (size=30873) 2024-12-02T15:21:47,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741935_1111 (size=30873) 2024-12-02T15:21:47,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741935_1111 (size=30873) 2024-12-02T15:21:47,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741936_1112 (size=29229) 2024-12-02T15:21:47,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741936_1112 (size=29229) 2024-12-02T15:21:47,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741936_1112 (size=29229) 2024-12-02T15:21:47,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741937_1113 (size=903849) 2024-12-02T15:21:47,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741937_1113 (size=903849) 2024-12-02T15:21:47,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741937_1113 (size=903849) 2024-12-02T15:21:47,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741938_1114 (size=443171) 2024-12-02T15:21:47,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741938_1114 (size=443171) 2024-12-02T15:21:47,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741938_1114 (size=443171) 2024-12-02T15:21:48,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741939_1115 (size=6424745) 2024-12-02T15:21:48,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741939_1115 (size=6424745) 2024-12-02T15:21:48,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741939_1115 (size=6424745) 2024-12-02T15:21:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741940_1116 (size=5175431) 2024-12-02T15:21:48,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741940_1116 (size=5175431) 2024-12-02T15:21:48,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741940_1116 (size=5175431) 2024-12-02T15:21:48,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741941_1117 (size=232881) 2024-12-02T15:21:48,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741941_1117 (size=232881) 2024-12-02T15:21:48,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741941_1117 (size=232881) 2024-12-02T15:21:48,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741942_1118 (size=1323991) 2024-12-02T15:21:48,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741942_1118 (size=1323991) 2024-12-02T15:21:48,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741942_1118 (size=1323991) 2024-12-02T15:21:48,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741943_1119 (size=4695811) 2024-12-02T15:21:48,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741943_1119 (size=4695811) 2024-12-02T15:21:48,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741943_1119 (size=4695811) 2024-12-02T15:21:48,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741944_1120 (size=1877034) 2024-12-02T15:21:48,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741944_1120 (size=1877034) 2024-12-02T15:21:48,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741944_1120 (size=1877034) 2024-12-02T15:21:48,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741945_1121 (size=217555) 2024-12-02T15:21:48,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741945_1121 (size=217555) 2024-12-02T15:21:48,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741945_1121 (size=217555) 2024-12-02T15:21:48,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741946_1122 (size=4188619) 2024-12-02T15:21:48,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741946_1122 (size=4188619) 2024-12-02T15:21:48,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741946_1122 (size=4188619) 2024-12-02T15:21:48,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741947_1123 (size=127628) 2024-12-02T15:21:48,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741947_1123 (size=127628) 2024-12-02T15:21:48,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741947_1123 (size=127628) 2024-12-02T15:21:48,242 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:21:48,253 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-02T15:21:48,257 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=7.9 K 2024-12-02T15:21:48,257 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.4 K 2024-12-02T15:21:48,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741948_1124 (size=445) 2024-12-02T15:21:48,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741948_1124 (size=445) 2024-12-02T15:21:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741948_1124 (size=445) 2024-12-02T15:21:48,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741949_1125 (size=21) 2024-12-02T15:21:48,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741949_1125 (size=21) 2024-12-02T15:21:48,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741949_1125 (size=21) 2024-12-02T15:21:48,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741950_1126 (size=304088) 2024-12-02T15:21:48,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741950_1126 (size=304088) 2024-12-02T15:21:48,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741950_1126 (size=304088) 2024-12-02T15:21:48,923 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:21:48,923 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:21:48,928 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0001_000001 (auth:SIMPLE) from 127.0.0.1:45684 2024-12-02T15:21:48,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000001/launch_container.sh] 2024-12-02T15:21:48,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000001/container_tokens] 2024-12-02T15:21:48,933 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0001/container_1733152780087_0001_01_000001/sysfs] 2024-12-02T15:21:49,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:33470 2024-12-02T15:21:50,009 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:21:53,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T15:21:53,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-02T15:21:53,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:53,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-12-02T15:21:54,299 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:43192 2024-12-02T15:21:54,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741951_1127 (size=349786) 2024-12-02T15:21:54,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741951_1127 (size=349786) 2024-12-02T15:21:54,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741951_1127 (size=349786) 2024-12-02T15:21:56,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:34234 2024-12-02T15:21:56,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:48504 2024-12-02T15:21:58,598 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:01,776 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:22:03,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741952_1128 (size=8054) 2024-12-02T15:22:03,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741952_1128 (size=8054) 2024-12-02T15:22:03,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741952_1128 (size=8054) 2024-12-02T15:22:03,439 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000002/launch_container.sh] 2024-12-02T15:22:03,439 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000002/container_tokens] 2024-12-02T15:22:03,439 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000002/sysfs] 2024-12-02T15:22:04,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741954_1130 (size=5566) 2024-12-02T15:22:04,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741954_1130 (size=5566) 2024-12-02T15:22:04,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741954_1130 (size=5566) 2024-12-02T15:22:04,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741953_1129 (size=22157) 2024-12-02T15:22:04,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741953_1129 (size=22157) 2024-12-02T15:22:04,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741953_1129 (size=22157) 2024-12-02T15:22:04,611 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000003/launch_container.sh] 2024-12-02T15:22:04,611 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000003/container_tokens] 2024-12-02T15:22:04,611 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000003/sysfs] 2024-12-02T15:22:04,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741955_1131 (size=465) 2024-12-02T15:22:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741955_1131 (size=465) 2024-12-02T15:22:04,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741955_1131 (size=465) 2024-12-02T15:22:04,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741956_1132 (size=22157) 2024-12-02T15:22:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741956_1132 (size=22157) 2024-12-02T15:22:04,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741956_1132 (size=22157) 2024-12-02T15:22:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741957_1133 (size=349786) 2024-12-02T15:22:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741957_1133 (size=349786) 2024-12-02T15:22:04,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741957_1133 (size=349786) 2024-12-02T15:22:05,014 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:37576 2024-12-02T15:22:06,783 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:22:06,783 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:22:06,791 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-12-02T15:22:06,791 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:22:06,791 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:22:06,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-02T15:22:06,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-02T15:22:06,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-02T15:22:06,792 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/testExportWithTargetName 2024-12-02T15:22:06,793 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-02T15:22:06,793 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152906654/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-02T15:22:06,801 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-02T15:22:06,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:06,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T15:22:06,805 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152926804"}]},"ts":"1733152926804"} 2024-12-02T15:22:06,807 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-02T15:22:06,807 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-02T15:22:06,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-02T15:22:06,809 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, UNASSIGN}] 2024-12-02T15:22:06,810 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, UNASSIGN 2024-12-02T15:22:06,810 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, UNASSIGN 2024-12-02T15:22:06,811 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=d6bc8d8e61dabca25846dd8d50e1ea8f, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:06,811 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=af7631d267d518d88b1ac02b61113820, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:06,813 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, UNASSIGN because future has completed 2024-12-02T15:22:06,813 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:06,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:06,814 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, UNASSIGN because future has completed 2024-12-02T15:22:06,814 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:06,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:22:06,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T15:22:06,966 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:22:06,966 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:06,966 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing d6bc8d8e61dabca25846dd8d50e1ea8f, disabling compactions & flushes 2024-12-02T15:22:06,966 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:22:06,966 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:22:06,966 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. after waiting 0 ms 2024-12-02T15:22:06,966 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:22:06,968 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close af7631d267d518d88b1ac02b61113820 2024-12-02T15:22:06,968 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:06,968 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing af7631d267d518d88b1ac02b61113820, disabling compactions & flushes 2024-12-02T15:22:06,968 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:22:06,968 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:22:06,968 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. after waiting 0 ms 2024-12-02T15:22:06,968 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:22:06,971 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:06,972 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:06,972 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:06,972 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f. 2024-12-02T15:22:06,972 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for d6bc8d8e61dabca25846dd8d50e1ea8f: Waiting for close lock at 1733152926966Running coprocessor pre-close hooks at 1733152926966Disabling compacts and flushes for region at 1733152926966Disabling writes for close at 1733152926966Writing region close event to WAL at 1733152926967 (+1 ms)Running coprocessor post-close hooks at 1733152926972 (+5 ms)Closed at 1733152926972 2024-12-02T15:22:06,973 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:06,973 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820. 2024-12-02T15:22:06,973 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for af7631d267d518d88b1ac02b61113820: Waiting for close lock at 1733152926968Running coprocessor pre-close hooks at 1733152926968Disabling compacts and flushes for region at 1733152926968Disabling writes for close at 1733152926968Writing region close event to WAL at 1733152926969 (+1 ms)Running coprocessor post-close hooks at 1733152926972 (+3 ms)Closed at 1733152926973 (+1 ms) 2024-12-02T15:22:06,974 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:22:06,975 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=d6bc8d8e61dabca25846dd8d50e1ea8f, regionState=CLOSED 2024-12-02T15:22:06,975 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed af7631d267d518d88b1ac02b61113820 2024-12-02T15:22:06,975 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=af7631d267d518d88b1ac02b61113820, regionState=CLOSED 2024-12-02T15:22:06,976 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:06,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:22:06,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-12-02T15:22:06,979 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure d6bc8d8e61dabca25846dd8d50e1ea8f, server=be0458f36726,43025,1733152773677 in 164 msec 2024-12-02T15:22:06,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-12-02T15:22:06,980 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure af7631d267d518d88b1ac02b61113820, server=be0458f36726,45405,1733152773787 in 164 msec 2024-12-02T15:22:06,980 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=d6bc8d8e61dabca25846dd8d50e1ea8f, UNASSIGN in 170 msec 2024-12-02T15:22:06,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-12-02T15:22:06,981 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=af7631d267d518d88b1ac02b61113820, UNASSIGN in 171 msec 2024-12-02T15:22:06,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-12-02T15:22:06,983 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 173 msec 2024-12-02T15:22:06,984 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152926984"}]},"ts":"1733152926984"} 2024-12-02T15:22:06,986 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-02T15:22:06,986 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-02T15:22:06,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 185 msec 2024-12-02T15:22:07,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-12-02T15:22:07,125 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T15:22:07,127 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-02T15:22:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,132 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-02T15:22:07,133 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,137 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-02T15:22:07,139 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:22:07,139 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820 2024-12-02T15:22:07,141 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/recovered.edits] 2024-12-02T15:22:07,141 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/recovered.edits] 2024-12-02T15:22:07,145 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/cf/af683d2283ad46818e1ed9e08001febc 2024-12-02T15:22:07,145 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/cf/bbc593ce93cf41d6bc5d6fc019bb02bb 2024-12-02T15:22:07,172 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820/recovered.edits/9.seqid 2024-12-02T15:22:07,173 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f/recovered.edits/9.seqid 2024-12-02T15:22:07,174 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/af7631d267d518d88b1ac02b61113820 2024-12-02T15:22:07,174 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithTargetName/d6bc8d8e61dabca25846dd8d50e1ea8f 2024-12-02T15:22:07,175 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-02T15:22:07,179 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,184 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-02T15:22:07,188 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-02T15:22:07,189 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,189 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-02T15:22:07,190 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152927189"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:07,190 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152927189"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:07,192 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:22:07,192 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d6bc8d8e61dabca25846dd8d50e1ea8f, NAME => 'testtb-testExportWithTargetName,,1733152905287.d6bc8d8e61dabca25846dd8d50e1ea8f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => af7631d267d518d88b1ac02b61113820, NAME => 'testtb-testExportWithTargetName,1,1733152905287.af7631d267d518d88b1ac02b61113820.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:22:07,192 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-02T15:22:07,192 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152927192"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:07,194 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-12-02T15:22:07,195 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-02T15:22:07,196 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 68 msec 2024-12-02T15:22:07,211 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T15:22:07,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T15:22:07,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T15:22:07,212 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,220 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-02T15:22:07,220 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,220 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-12-02T15:22:07,221 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-12-02T15:22:07,221 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-12-02T15:22:07,228 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T15:22:07,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-02T15:22:07,233 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-12-02T15:22:07,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-02T15:22:07,261 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=796 (was 769) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 114130) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:38003 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-2024 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:52488 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:36288 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_126171488_1 at /127.0.0.1:52464 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38003 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:37484 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:35107 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35107 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42461 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=810 (was 797) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=620 (was 553) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2175 (was 561) - AvailableMemoryMB LEAK? - 2024-12-02T15:22:07,261 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=796 is superior to 500 2024-12-02T15:22:07,280 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=796, OpenFileDescriptor=810, MaxFileDescriptor=1048576, SystemLoadAverage=620, ProcessCount=17, AvailableMemoryMB=2175 2024-12-02T15:22:07,280 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=796 is superior to 500 2024-12-02T15:22:07,282 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:22:07,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:07,284 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:22:07,284 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:07,284 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-12-02T15:22:07,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:22:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T15:22:07,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741958_1134 (size=404) 2024-12-02T15:22:07,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741958_1134 (size=404) 2024-12-02T15:22:07,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741958_1134 (size=404) 2024-12-02T15:22:07,296 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 04be54dd4f37edf3659f725503c71f38, NAME => 'testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:07,303 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 552215209d1542254b303f0cef0c7638, NAME => 'testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:07,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741959_1135 (size=65) 2024-12-02T15:22:07,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741959_1135 (size=65) 2024-12-02T15:22:07,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741959_1135 (size=65) 2024-12-02T15:22:07,313 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:07,313 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 04be54dd4f37edf3659f725503c71f38, disabling compactions & flushes 2024-12-02T15:22:07,313 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,313 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,313 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. after waiting 0 ms 2024-12-02T15:22:07,313 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,314 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,314 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 04be54dd4f37edf3659f725503c71f38: Waiting for close lock at 1733152927313Disabling compacts and flushes for region at 1733152927313Disabling writes for close at 1733152927313Writing region close event to WAL at 1733152927314 (+1 ms)Closed at 1733152927314 2024-12-02T15:22:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741960_1136 (size=65) 2024-12-02T15:22:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741960_1136 (size=65) 2024-12-02T15:22:07,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741960_1136 (size=65) 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 552215209d1542254b303f0cef0c7638, disabling compactions & flushes 2024-12-02T15:22:07,319 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. after waiting 0 ms 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,319 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,319 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 552215209d1542254b303f0cef0c7638: Waiting for close lock at 1733152927319Disabling compacts and flushes for region at 1733152927319Disabling writes for close at 1733152927319Writing region close event to WAL at 1733152927319Closed at 1733152927319 2024-12-02T15:22:07,320 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:22:07,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733152927320"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152927320"}]},"ts":"1733152927320"} 2024-12-02T15:22:07,321 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733152927320"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152927320"}]},"ts":"1733152927320"} 2024-12-02T15:22:07,323 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:22:07,324 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:22:07,324 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152927324"}]},"ts":"1733152927324"} 2024-12-02T15:22:07,326 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T15:22:07,326 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:22:07,328 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:22:07,328 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:22:07,328 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:22:07,328 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:22:07,328 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, ASSIGN}] 2024-12-02T15:22:07,334 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, ASSIGN 2024-12-02T15:22:07,334 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, ASSIGN 2024-12-02T15:22:07,335 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:22:07,335 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:22:07,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T15:22:07,485 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:22:07,486 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=552215209d1542254b303f0cef0c7638, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:07,486 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=04be54dd4f37edf3659f725503c71f38, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:07,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, ASSIGN because future has completed 2024-12-02T15:22:07,488 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:07,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, ASSIGN because future has completed 2024-12-02T15:22:07,489 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:22:07,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T15:22:07,644 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,644 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,644 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 552215209d1542254b303f0cef0c7638, NAME => 'testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:22:07,644 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 04be54dd4f37edf3659f725503c71f38, NAME => 'testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:22:07,644 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. service=AccessControlService 2024-12-02T15:22:07,644 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. service=AccessControlService 2024-12-02T15:22:07,645 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:07,645 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,645 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,647 INFO [StoreOpener-552215209d1542254b303f0cef0c7638-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,647 INFO [StoreOpener-04be54dd4f37edf3659f725503c71f38-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,648 INFO [StoreOpener-552215209d1542254b303f0cef0c7638-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 552215209d1542254b303f0cef0c7638 columnFamilyName cf 2024-12-02T15:22:07,648 INFO [StoreOpener-04be54dd4f37edf3659f725503c71f38-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04be54dd4f37edf3659f725503c71f38 columnFamilyName cf 2024-12-02T15:22:07,648 DEBUG [StoreOpener-552215209d1542254b303f0cef0c7638-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:07,648 DEBUG [StoreOpener-04be54dd4f37edf3659f725503c71f38-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:07,649 INFO [StoreOpener-552215209d1542254b303f0cef0c7638-1 {}] regionserver.HStore(327): Store=552215209d1542254b303f0cef0c7638/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:07,649 INFO [StoreOpener-04be54dd4f37edf3659f725503c71f38-1 {}] regionserver.HStore(327): Store=04be54dd4f37edf3659f725503c71f38/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:07,649 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,649 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,650 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,652 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,652 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,654 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:07,654 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:07,654 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 04be54dd4f37edf3659f725503c71f38; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58940172, jitterRate=-0.1217229962348938}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:07,654 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 552215209d1542254b303f0cef0c7638; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64134951, jitterRate=-0.044314756989479065}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:07,655 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:07,655 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,655 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 04be54dd4f37edf3659f725503c71f38: Running coprocessor pre-open hook at 1733152927645Writing region info on filesystem at 1733152927645Initializing all the Stores at 1733152927646 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152927646Cleaning up temporary data from old regions at 1733152927650 (+4 ms)Running coprocessor post-open hooks at 1733152927655 (+5 ms)Region opened successfully at 1733152927655 2024-12-02T15:22:07,655 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 552215209d1542254b303f0cef0c7638: Running coprocessor pre-open hook at 1733152927645Writing region info on filesystem at 1733152927645Initializing all the Stores at 1733152927646 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152927646Cleaning up temporary data from old regions at 1733152927650 (+4 ms)Running coprocessor post-open hooks at 1733152927655 (+5 ms)Region opened successfully at 1733152927655 2024-12-02T15:22:07,656 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638., pid=67, masterSystemTime=1733152927640 2024-12-02T15:22:07,656 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38., pid=66, masterSystemTime=1733152927640 2024-12-02T15:22:07,658 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,658 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:07,658 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=04be54dd4f37edf3659f725503c71f38, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:07,658 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,658 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:07,659 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=552215209d1542254b303f0cef0c7638, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:07,660 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:07,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:22:07,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=64 2024-12-02T15:22:07,662 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677 in 173 msec 2024-12-02T15:22:07,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=65 2024-12-02T15:22:07,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, ASSIGN in 334 msec 2024-12-02T15:22:07,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848 in 173 msec 2024-12-02T15:22:07,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=65, resume processing ppid=63 2024-12-02T15:22:07,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, ASSIGN in 335 msec 2024-12-02T15:22:07,665 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:22:07,665 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152927665"}]},"ts":"1733152927665"} 2024-12-02T15:22:07,667 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T15:22:07,668 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:22:07,668 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-02T15:22:07,671 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T15:22:07,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,703 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,703 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:07,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:07,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:07,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:07,712 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:07,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 430 msec 2024-12-02T15:22:07,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-12-02T15:22:07,913 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T15:22:07,914 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-02T15:22:07,914 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:07,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-02T15:22:07,919 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:07,919 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-02T15:22:07,919 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:07,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T15:22:07,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152927923 (current time:1733152927923). 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6098630f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:07,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:07,926 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:07,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:07,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:07,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f96fd0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,926 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:07,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:07,927 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,928 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45682, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:07,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f94ad8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:07,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:07,930 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:07,931 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40044, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:07,932 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,933 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@502cc9c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:07,935 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:07,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:07,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:07,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6006dce3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:07,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:07,936 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,937 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45702, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:07,938 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d9cee5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:07,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:07,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:07,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:07,942 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40052, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:07,944 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:07,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:07,946 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49852, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:07,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T15:22:07,949 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:07,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:07,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T15:22:07,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T15:22:07,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T15:22:07,953 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:07,954 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:07,957 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741961_1137 (size=161) 2024-12-02T15:22:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741961_1137 (size=161) 2024-12-02T15:22:07,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741961_1137 (size=161) 2024-12-02T15:22:07,966 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:07,966 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638}] 2024-12-02T15:22:07,967 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:07,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T15:22:08,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-02T15:22:08,118 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 04be54dd4f37edf3659f725503c71f38: 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 552215209d1542254b303f0cef0c7638: 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:08,119 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:08,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741962_1138 (size=68) 2024-12-02T15:22:08,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741962_1138 (size=68) 2024-12-02T15:22:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741962_1138 (size=68) 2024-12-02T15:22:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741963_1139 (size=68) 2024-12-02T15:22:08,129 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:08,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741963_1139 (size=68) 2024-12-02T15:22:08,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-02T15:22:08,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741963_1139 (size=68) 2024-12-02T15:22:08,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-12-02T15:22:08,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:08,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:08,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-02T15:22:08,131 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:08,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-12-02T15:22:08,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,131 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,133 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 in 165 msec 2024-12-02T15:22:08,133 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-12-02T15:22:08,134 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 in 166 msec 2024-12-02T15:22:08,134 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:08,134 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:08,135 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:08,135 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:08,136 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:08,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741964_1140 (size=543) 2024-12-02T15:22:08,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741964_1140 (size=543) 2024-12-02T15:22:08,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741964_1140 (size=543) 2024-12-02T15:22:08,148 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:08,153 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:08,154 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:08,155 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:08,155 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-02T15:22:08,156 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 205 msec 2024-12-02T15:22:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-12-02T15:22:08,273 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T15:22:08,279 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='0e92cd71a6a4adfa819280ce7cc2e1d32', locateType=CURRENT is [region=testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:08,280 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='11760a0683ada87011e017af40001f14c', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:08,282 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='2293399e1a1d2fc0705b011e7cd12d9f2', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:08,283 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithResetTtl', row='3c16f25a5eecb41f1bc42f4fbe633ea98', locateType=CURRENT is [region=testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:08,286 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:08,288 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:08,292 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:08,298 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:08,302 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-02T15:22:08,302 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:08,302 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:08,305 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:08,311 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:08,318 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:08,321 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T15:22:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152928321 (current time:1733152928321). 2024-12-02T15:22:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T15:22:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2420d3d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:08,323 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:08,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:08,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:08,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@143c9a89, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:08,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:08,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,325 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:08,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c99a469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:08,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:08,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:08,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40064, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:08,330 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,331 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74d740ca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:08,333 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:08,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:08,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:08,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@386bef3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:08,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:08,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,335 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:08,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a95afc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:08,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:08,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:08,339 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40076, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:08,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:08,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:08,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:08,344 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T15:22:08,345 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-02T15:22:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T15:22:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T15:22:08,348 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:08,349 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:08,351 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:08,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741965_1141 (size=156) 2024-12-02T15:22:08,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741965_1141 (size=156) 2024-12-02T15:22:08,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741965_1141 (size=156) 2024-12-02T15:22:08,362 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:08,362 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638}] 2024-12-02T15:22:08,364 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:08,364 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T15:22:08,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-02T15:22:08,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-02T15:22:08,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:08,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:08,516 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 04be54dd4f37edf3659f725503c71f38 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T15:22:08,516 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 552215209d1542254b303f0cef0c7638 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T15:22:08,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/.tmp/cf/9d39722304b844b0b3f16b147bef222b is 71, key is 13ca56278642643d89208c94a4e5e544/cf:q/1733152928292/Put/seqid=0 2024-12-02T15:22:08,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/.tmp/cf/8aaa5e3805c042eba3c598788d4cbbc6 is 71, key is 02b9718f365c5bdd3acc25d5239fb057/cf:q/1733152928286/Put/seqid=0 2024-12-02T15:22:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741967_1143 (size=5356) 2024-12-02T15:22:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741966_1142 (size=8258) 2024-12-02T15:22:08,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741967_1143 (size=5356) 2024-12-02T15:22:08,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741966_1142 (size=8258) 2024-12-02T15:22:08,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741967_1143 (size=5356) 2024-12-02T15:22:08,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/.tmp/cf/8aaa5e3805c042eba3c598788d4cbbc6 2024-12-02T15:22:08,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/.tmp/cf/9d39722304b844b0b3f16b147bef222b 2024-12-02T15:22:08,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741966_1142 (size=8258) 2024-12-02T15:22:08,551 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/.tmp/cf/9d39722304b844b0b3f16b147bef222b as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b 2024-12-02T15:22:08,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/.tmp/cf/8aaa5e3805c042eba3c598788d4cbbc6 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6 2024-12-02T15:22:08,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6, entries=4, sequenceid=6, filesize=5.2 K 2024-12-02T15:22:08,562 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b, entries=46, sequenceid=6, filesize=8.1 K 2024-12-02T15:22:08,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 04be54dd4f37edf3659f725503c71f38 in 46ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-02T15:22:08,563 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 552215209d1542254b303f0cef0c7638 in 47ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 04be54dd4f37edf3659f725503c71f38: 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 552215209d1542254b303f0cef0c7638: 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. for snaptb0-testExportWithResetTtl completed. 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. for snaptb0-testExportWithResetTtl completed. 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6] hfiles 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b] hfiles 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6 for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,563 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b for snapshot=snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741968_1144 (size=107) 2024-12-02T15:22:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741969_1145 (size=107) 2024-12-02T15:22:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741969_1145 (size=107) 2024-12-02T15:22:08,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741968_1144 (size=107) 2024-12-02T15:22:08,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741969_1145 (size=107) 2024-12-02T15:22:08,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:08,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-02T15:22:08,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741968_1144 (size=107) 2024-12-02T15:22:08,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:08,577 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-02T15:22:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-12-02T15:22:08,578 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:08,578 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:08,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-12-02T15:22:08,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,578 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:08,581 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04be54dd4f37edf3659f725503c71f38 in 217 msec 2024-12-02T15:22:08,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=73, resume processing ppid=71 2024-12-02T15:22:08,583 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:08,583 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 552215209d1542254b303f0cef0c7638 in 217 msec 2024-12-02T15:22:08,584 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:08,585 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:08,585 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,586 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741970_1146 (size=621) 2024-12-02T15:22:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741970_1146 (size=621) 2024-12-02T15:22:08,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741970_1146 (size=621) 2024-12-02T15:22:08,606 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:08,611 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:08,612 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-02T15:22:08,613 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:08,613 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-02T15:22:08,615 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 268 msec 2024-12-02T15:22:08,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-12-02T15:22:08,664 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T15:22:08,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:22:08,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:08,671 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:22:08,671 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:08,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-12-02T15:22:08,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T15:22:08,672 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:22:08,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741971_1147 (size=397) 2024-12-02T15:22:08,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741971_1147 (size=397) 2024-12-02T15:22:08,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741971_1147 (size=397) 2024-12-02T15:22:08,683 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ccbd18e92db6010118a2b8a17b87472d, NAME => 'testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:08,683 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3deadc203bd5ed12ca129719026b1ee7, NAME => 'testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741973_1149 (size=58) 2024-12-02T15:22:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741972_1148 (size=58) 2024-12-02T15:22:08,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741973_1149 (size=58) 2024-12-02T15:22:08,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741972_1148 (size=58) 2024-12-02T15:22:08,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741973_1149 (size=58) 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:08,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741972_1148 (size=58) 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing ccbd18e92db6010118a2b8a17b87472d, disabling compactions & flushes 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 3deadc203bd5ed12ca129719026b1ee7, disabling compactions & flushes 2024-12-02T15:22:08,694 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:08,694 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. after waiting 0 ms 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. after waiting 0 ms 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:08,694 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:08,694 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3deadc203bd5ed12ca129719026b1ee7: Waiting for close lock at 1733152928694Disabling compacts and flushes for region at 1733152928694Disabling writes for close at 1733152928694Writing region close event to WAL at 1733152928694Closed at 1733152928694 2024-12-02T15:22:08,694 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for ccbd18e92db6010118a2b8a17b87472d: Waiting for close lock at 1733152928694Disabling compacts and flushes for region at 1733152928694Disabling writes for close at 1733152928694Writing region close event to WAL at 1733152928694Closed at 1733152928694 2024-12-02T15:22:08,695 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:22:08,695 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733152928695"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152928695"}]},"ts":"1733152928695"} 2024-12-02T15:22:08,696 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733152928695"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152928695"}]},"ts":"1733152928695"} 2024-12-02T15:22:08,698 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:22:08,698 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:22:08,699 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152928698"}]},"ts":"1733152928698"} 2024-12-02T15:22:08,700 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-02T15:22:08,700 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:22:08,702 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:22:08,702 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:22:08,702 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:22:08,702 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:22:08,702 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, ASSIGN}] 2024-12-02T15:22:08,703 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, ASSIGN 2024-12-02T15:22:08,703 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, ASSIGN 2024-12-02T15:22:08,704 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:22:08,704 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:22:08,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T15:22:08,854 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:22:08,855 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=ccbd18e92db6010118a2b8a17b87472d, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:08,855 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=3deadc203bd5ed12ca129719026b1ee7, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:08,857 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, ASSIGN because future has completed 2024-12-02T15:22:08,858 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:22:08,858 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, ASSIGN because future has completed 2024-12-02T15:22:08,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:08,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T15:22:09,019 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:09,019 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => 3deadc203bd5ed12ca129719026b1ee7, NAME => 'testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:22:09,019 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,019 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. service=AccessControlService 2024-12-02T15:22:09,019 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => ccbd18e92db6010118a2b8a17b87472d, NAME => 'testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:22:09,019 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. service=AccessControlService 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:09,020 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,020 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,021 INFO [StoreOpener-3deadc203bd5ed12ca129719026b1ee7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,021 INFO [StoreOpener-ccbd18e92db6010118a2b8a17b87472d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,023 INFO [StoreOpener-ccbd18e92db6010118a2b8a17b87472d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ccbd18e92db6010118a2b8a17b87472d columnFamilyName cf 2024-12-02T15:22:09,023 INFO [StoreOpener-3deadc203bd5ed12ca129719026b1ee7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3deadc203bd5ed12ca129719026b1ee7 columnFamilyName cf 2024-12-02T15:22:09,023 DEBUG [StoreOpener-ccbd18e92db6010118a2b8a17b87472d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:09,023 DEBUG [StoreOpener-3deadc203bd5ed12ca129719026b1ee7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:09,024 INFO [StoreOpener-3deadc203bd5ed12ca129719026b1ee7-1 {}] regionserver.HStore(327): Store=3deadc203bd5ed12ca129719026b1ee7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:09,024 INFO [StoreOpener-ccbd18e92db6010118a2b8a17b87472d-1 {}] regionserver.HStore(327): Store=ccbd18e92db6010118a2b8a17b87472d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:09,024 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,024 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,028 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,028 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,030 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:09,030 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:09,031 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened 3deadc203bd5ed12ca129719026b1ee7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63677518, jitterRate=-0.05113103985786438}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:09,031 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened ccbd18e92db6010118a2b8a17b87472d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60854355, jitterRate=-0.09319944679737091}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:09,031 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,031 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,031 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for 3deadc203bd5ed12ca129719026b1ee7: Running coprocessor pre-open hook at 1733152929020Writing region info on filesystem at 1733152929020Initializing all the Stores at 1733152929021 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152929021Cleaning up temporary data from old regions at 1733152929026 (+5 ms)Running coprocessor post-open hooks at 1733152929031 (+5 ms)Region opened successfully at 1733152929031 2024-12-02T15:22:09,031 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for ccbd18e92db6010118a2b8a17b87472d: Running coprocessor pre-open hook at 1733152929020Writing region info on filesystem at 1733152929020Initializing all the Stores at 1733152929021 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152929021Cleaning up temporary data from old regions at 1733152929026 (+5 ms)Running coprocessor post-open hooks at 1733152929031 (+5 ms)Region opened successfully at 1733152929031 2024-12-02T15:22:09,032 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7., pid=77, masterSystemTime=1733152929010 2024-12-02T15:22:09,032 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d., pid=78, masterSystemTime=1733152929012 2024-12-02T15:22:09,034 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,034 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,034 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=ccbd18e92db6010118a2b8a17b87472d, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:09,034 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:09,034 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:09,035 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=3deadc203bd5ed12ca129719026b1ee7, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:09,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:09,037 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:22:09,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-12-02T15:22:09,038 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677 in 178 msec 2024-12-02T15:22:09,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-12-02T15:22:09,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787 in 180 msec 2024-12-02T15:22:09,039 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, ASSIGN in 336 msec 2024-12-02T15:22:09,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=76, resume processing ppid=74 2024-12-02T15:22:09,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, ASSIGN in 337 msec 2024-12-02T15:22:09,041 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:22:09,041 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152929041"}]},"ts":"1733152929041"} 2024-12-02T15:22:09,043 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-02T15:22:09,044 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:22:09,044 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-02T15:22:09,047 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T15:22:09,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:09,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:09,086 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:09,086 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:09,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,127 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,128 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:09,129 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 460 msec 2024-12-02T15:22:09,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-12-02T15:22:09,304 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-12-02T15:22:09,304 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-02T15:22:09,304 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:09,308 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-02T15:22:09,309 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:09,309 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportWithResetTtl assigned. 2024-12-02T15:22:09,309 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:09,316 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='0d547810c5f6ec1952075221fa41f4143', locateType=CURRENT is [region=testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:09,317 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='19acfaa5fc2378de7402ee5d619829319', locateType=CURRENT is [region=testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:09,318 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='21fc1c83256879503e6d3aa9a3d93c7e6', locateType=CURRENT is [region=testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:09,319 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='3fb4a8a03e5cd504eb496dd7ec36cc433', locateType=CURRENT is [region=testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:09,320 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportWithResetTtl', row='489be475708ceeae06ec932dcf4c22a38', locateType=CURRENT is [region=testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:09,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:09,327 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:09,328 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:09,331 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-12-02T15:22:09,331 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,331 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:09,333 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:09,338 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:09,344 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-12-02T15:22:09,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T15:22:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152929347 (current time:1733152929347). 2024-12-02T15:22:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-02T15:22:09,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:09,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76bf189a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:09,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:09,349 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:09,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:09,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:09,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6efaba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:09,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:09,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,351 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45750, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:09,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2832f28a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:09,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:09,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:09,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40078, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:09,355 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:22:09,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:09,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,355 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:09,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b3e5cc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:09,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:09,357 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:09,357 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:09,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:09,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59fc5ce3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:09,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:09,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,359 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45756, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:09,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@708ad29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:09,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:09,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:09,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:09,363 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40086, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:09,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:09,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:09,366 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49860, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:09,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:22:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:09,368 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:09,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-02T15:22:09,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:09,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-02T15:22:09,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T15:22:09,372 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:09,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T15:22:09,373 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:09,376 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741974_1150 (size=143) 2024-12-02T15:22:09,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741974_1150 (size=143) 2024-12-02T15:22:09,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741974_1150 (size=143) 2024-12-02T15:22:09,386 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:09,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccbd18e92db6010118a2b8a17b87472d}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3deadc203bd5ed12ca129719026b1ee7}] 2024-12-02T15:22:09,387 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,387 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T15:22:09,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-12-02T15:22:09,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-12-02T15:22:09,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:09,540 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing ccbd18e92db6010118a2b8a17b87472d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T15:22:09,540 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing 3deadc203bd5ed12ca129719026b1ee7 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T15:22:09,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/.tmp/cf/3ddb1b8c6e034f9b99603e43949ee161 is 71, key is 05e3c9c79418ed5a37975a684cd364cd/cf:q/1733152929325/Put/seqid=0 2024-12-02T15:22:09,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/.tmp/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 is 71, key is 11b2fb70fb86e470b3b38685968c1648/cf:q/1733152929327/Put/seqid=0 2024-12-02T15:22:09,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741976_1152 (size=8324) 2024-12-02T15:22:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741976_1152 (size=8324) 2024-12-02T15:22:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741975_1151 (size=5288) 2024-12-02T15:22:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741975_1151 (size=5288) 2024-12-02T15:22:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741975_1151 (size=5288) 2024-12-02T15:22:09,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741976_1152 (size=8324) 2024-12-02T15:22:09,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/.tmp/cf/3ddb1b8c6e034f9b99603e43949ee161 2024-12-02T15:22:09,568 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/.tmp/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 2024-12-02T15:22:09,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/.tmp/cf/3ddb1b8c6e034f9b99603e43949ee161 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161 2024-12-02T15:22:09,574 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/.tmp/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 2024-12-02T15:22:09,579 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161, entries=3, sequenceid=5, filesize=5.2 K 2024-12-02T15:22:09,580 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8, entries=47, sequenceid=5, filesize=8.1 K 2024-12-02T15:22:09,580 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ccbd18e92db6010118a2b8a17b87472d in 40ms, sequenceid=5, compaction requested=false 2024-12-02T15:22:09,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-02T15:22:09,580 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 3deadc203bd5ed12ca129719026b1ee7 in 40ms, sequenceid=5, compaction requested=false 2024-12-02T15:22:09,580 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for 3deadc203bd5ed12ca129719026b1ee7: 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for ccbd18e92db6010118a2b8a17b87472d: 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. for snaptb-testExportWithResetTtl completed. 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. for snaptb-testExportWithResetTtl completed. 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8] hfiles 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161] hfiles 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 for snapshot=snaptb-testExportWithResetTtl 2024-12-02T15:22:09,581 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161 for snapshot=snaptb-testExportWithResetTtl 2024-12-02T15:22:09,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741977_1153 (size=100) 2024-12-02T15:22:09,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741977_1153 (size=100) 2024-12-02T15:22:09,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741977_1153 (size=100) 2024-12-02T15:22:09,589 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:09,590 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-12-02T15:22:09,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-12-02T15:22:09,590 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,590 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:09,593 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3deadc203bd5ed12ca129719026b1ee7 in 205 msec 2024-12-02T15:22:09,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741978_1154 (size=100) 2024-12-02T15:22:09,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741978_1154 (size=100) 2024-12-02T15:22:09,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741978_1154 (size=100) 2024-12-02T15:22:09,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:09,608 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-02T15:22:09,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-12-02T15:22:09,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,609 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:09,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=80, resume processing ppid=79 2024-12-02T15:22:09,613 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ccbd18e92db6010118a2b8a17b87472d in 224 msec 2024-12-02T15:22:09,613 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:09,614 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:09,615 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:09,615 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-02T15:22:09,616 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T15:22:09,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741979_1155 (size=600) 2024-12-02T15:22:09,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741979_1155 (size=600) 2024-12-02T15:22:09,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741979_1155 (size=600) 2024-12-02T15:22:09,640 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:09,648 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:09,649 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T15:22:09,651 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:09,651 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-12-02T15:22:09,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 282 msec 2024-12-02T15:22:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-12-02T15:22:09,693 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-12-02T15:22:09,705 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704 2024-12-02T15:22:09,705 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:09,735 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:09,735 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T15:22:09,737 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:22:09,741 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-02T15:22:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741981_1157 (size=600) 2024-12-02T15:22:09,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741980_1156 (size=143) 2024-12-02T15:22:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741980_1156 (size=143) 2024-12-02T15:22:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741980_1156 (size=143) 2024-12-02T15:22:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741981_1157 (size=600) 2024-12-02T15:22:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741981_1157 (size=600) 2024-12-02T15:22:09,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741982_1158 (size=141) 2024-12-02T15:22:09,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741982_1158 (size=141) 2024-12-02T15:22:09,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741982_1158 (size=141) 2024-12-02T15:22:09,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:09,769 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:09,770 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,626 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-8136999301885212085.jar 2024-12-02T15:22:10,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,627 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-8055571795447625696.jar 2024-12-02T15:22:10,706 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,707 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,708 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:10,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:22:10,709 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:22:10,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:22:10,710 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:22:10,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:22:10,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:22:10,711 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:22:10,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:22:10,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:22:10,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:22:10,712 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:22:10,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:10,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:10,713 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:10,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:10,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:10,714 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:10,715 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:10,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741983_1159 (size=24020) 2024-12-02T15:22:10,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741983_1159 (size=24020) 2024-12-02T15:22:10,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741983_1159 (size=24020) 2024-12-02T15:22:10,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741984_1160 (size=77755) 2024-12-02T15:22:10,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741984_1160 (size=77755) 2024-12-02T15:22:10,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741984_1160 (size=77755) 2024-12-02T15:22:10,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741985_1161 (size=131360) 2024-12-02T15:22:10,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741985_1161 (size=131360) 2024-12-02T15:22:10,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741985_1161 (size=131360) 2024-12-02T15:22:10,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741986_1162 (size=111793) 2024-12-02T15:22:10,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741986_1162 (size=111793) 2024-12-02T15:22:10,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741986_1162 (size=111793) 2024-12-02T15:22:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741987_1163 (size=1832290) 2024-12-02T15:22:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741987_1163 (size=1832290) 2024-12-02T15:22:10,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741987_1163 (size=1832290) 2024-12-02T15:22:10,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741988_1164 (size=8360005) 2024-12-02T15:22:10,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741988_1164 (size=8360005) 2024-12-02T15:22:10,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741988_1164 (size=8360005) 2024-12-02T15:22:10,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741989_1165 (size=503880) 2024-12-02T15:22:10,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741989_1165 (size=503880) 2024-12-02T15:22:10,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741989_1165 (size=503880) 2024-12-02T15:22:10,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741990_1166 (size=6424745) 2024-12-02T15:22:10,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741990_1166 (size=6424745) 2024-12-02T15:22:10,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741990_1166 (size=6424745) 2024-12-02T15:22:10,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741991_1167 (size=322274) 2024-12-02T15:22:10,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741991_1167 (size=322274) 2024-12-02T15:22:10,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741991_1167 (size=322274) 2024-12-02T15:22:10,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741992_1168 (size=20406) 2024-12-02T15:22:10,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741992_1168 (size=20406) 2024-12-02T15:22:10,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741992_1168 (size=20406) 2024-12-02T15:22:10,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741993_1169 (size=45609) 2024-12-02T15:22:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741993_1169 (size=45609) 2024-12-02T15:22:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741993_1169 (size=45609) 2024-12-02T15:22:10,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741994_1170 (size=136454) 2024-12-02T15:22:10,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741994_1170 (size=136454) 2024-12-02T15:22:10,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741994_1170 (size=136454) 2024-12-02T15:22:11,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741995_1171 (size=1597136) 2024-12-02T15:22:11,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741995_1171 (size=1597136) 2024-12-02T15:22:11,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741995_1171 (size=1597136) 2024-12-02T15:22:11,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741996_1172 (size=30873) 2024-12-02T15:22:11,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741996_1172 (size=30873) 2024-12-02T15:22:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741996_1172 (size=30873) 2024-12-02T15:22:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741997_1173 (size=29229) 2024-12-02T15:22:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741997_1173 (size=29229) 2024-12-02T15:22:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741997_1173 (size=29229) 2024-12-02T15:22:11,109 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0002_000001 (auth:SIMPLE) from 127.0.0.1:41870 2024-12-02T15:22:11,129 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000001/launch_container.sh] 2024-12-02T15:22:11,129 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000001/container_tokens] 2024-12-02T15:22:11,129 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0002/container_1733152780087_0002_01_000001/sysfs] 2024-12-02T15:22:11,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741998_1174 (size=903849) 2024-12-02T15:22:11,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741998_1174 (size=903849) 2024-12-02T15:22:11,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741998_1174 (size=903849) 2024-12-02T15:22:11,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741999_1175 (size=443171) 2024-12-02T15:22:11,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741999_1175 (size=443171) 2024-12-02T15:22:11,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741999_1175 (size=443171) 2024-12-02T15:22:11,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742000_1176 (size=5175431) 2024-12-02T15:22:11,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742000_1176 (size=5175431) 2024-12-02T15:22:11,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742000_1176 (size=5175431) 2024-12-02T15:22:11,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742001_1177 (size=232881) 2024-12-02T15:22:11,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742001_1177 (size=232881) 2024-12-02T15:22:11,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742001_1177 (size=232881) 2024-12-02T15:22:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T15:22:11,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T15:22:11,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742002_1178 (size=1323991) 2024-12-02T15:22:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742003_1179 (size=4695811) 2024-12-02T15:22:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742003_1179 (size=4695811) 2024-12-02T15:22:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742003_1179 (size=4695811) 2024-12-02T15:22:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742004_1180 (size=1877034) 2024-12-02T15:22:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742004_1180 (size=1877034) 2024-12-02T15:22:11,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742004_1180 (size=1877034) 2024-12-02T15:22:11,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742005_1181 (size=217555) 2024-12-02T15:22:11,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742005_1181 (size=217555) 2024-12-02T15:22:11,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742005_1181 (size=217555) 2024-12-02T15:22:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742006_1182 (size=4188619) 2024-12-02T15:22:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742006_1182 (size=4188619) 2024-12-02T15:22:11,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742006_1182 (size=4188619) 2024-12-02T15:22:11,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742007_1183 (size=127628) 2024-12-02T15:22:11,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742007_1183 (size=127628) 2024-12-02T15:22:11,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742007_1183 (size=127628) 2024-12-02T15:22:11,686 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:22:11,689 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-02T15:22:11,693 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-02T15:22:11,693 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-02T15:22:11,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742008_1184 (size=427) 2024-12-02T15:22:11,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742008_1184 (size=427) 2024-12-02T15:22:11,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742008_1184 (size=427) 2024-12-02T15:22:11,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742009_1185 (size=21) 2024-12-02T15:22:11,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742009_1185 (size=21) 2024-12-02T15:22:11,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742009_1185 (size=21) 2024-12-02T15:22:11,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742010_1186 (size=304075) 2024-12-02T15:22:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742010_1186 (size=304075) 2024-12-02T15:22:11,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742010_1186 (size=304075) 2024-12-02T15:22:11,802 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:11,802 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:12,110 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:41884 2024-12-02T15:22:12,514 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:13,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T15:22:13,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T15:22:13,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T15:22:13,094 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-02T15:22:13,095 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-02T15:22:16,911 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:35438 2024-12-02T15:22:17,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742011_1187 (size=349773) 2024-12-02T15:22:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742011_1187 (size=349773) 2024-12-02T15:22:17,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742011_1187 (size=349773) 2024-12-02T15:22:18,598 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:19,505 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:36194 2024-12-02T15:22:19,507 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:49290 2024-12-02T15:22:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742012_1188 (size=8324) 2024-12-02T15:22:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742012_1188 (size=8324) 2024-12-02T15:22:25,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742012_1188 (size=8324) 2024-12-02T15:22:26,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742014_1190 (size=5288) 2024-12-02T15:22:26,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742014_1190 (size=5288) 2024-12-02T15:22:26,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742014_1190 (size=5288) 2024-12-02T15:22:26,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742013_1189 (size=22124) 2024-12-02T15:22:26,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742013_1189 (size=22124) 2024-12-02T15:22:26,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742013_1189 (size=22124) 2024-12-02T15:22:26,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742015_1191 (size=462) 2024-12-02T15:22:26,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742015_1191 (size=462) 2024-12-02T15:22:26,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742015_1191 (size=462) 2024-12-02T15:22:26,674 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000003/launch_container.sh] 2024-12-02T15:22:26,675 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000003/container_tokens] 2024-12-02T15:22:26,675 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000003/sysfs] 2024-12-02T15:22:26,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742016_1192 (size=22124) 2024-12-02T15:22:26,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742016_1192 (size=22124) 2024-12-02T15:22:26,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742016_1192 (size=22124) 2024-12-02T15:22:26,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742017_1193 (size=349773) 2024-12-02T15:22:26,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742017_1193 (size=349773) 2024-12-02T15:22:26,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742017_1193 (size=349773) 2024-12-02T15:22:26,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:37756 2024-12-02T15:22:27,985 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:22:27,987 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:22:27,992 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-12-02T15:22:27,992 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:22:27,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:22:27,993 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T15:22:27,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T15:22:27,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T15:22:27,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-02T15:22:27,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-02T15:22:27,994 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152929704/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-02T15:22:28,002 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-02T15:22:28,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T15:22:28,005 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152948005"}]},"ts":"1733152948005"} 2024-12-02T15:22:28,007 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T15:22:28,007 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-02T15:22:28,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-02T15:22:28,009 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, UNASSIGN}] 2024-12-02T15:22:28,010 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, UNASSIGN 2024-12-02T15:22:28,010 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, UNASSIGN 2024-12-02T15:22:28,011 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=3deadc203bd5ed12ca129719026b1ee7, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:28,012 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=ccbd18e92db6010118a2b8a17b87472d, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:28,012 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, UNASSIGN because future has completed 2024-12-02T15:22:28,013 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:28,013 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:22:28,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, UNASSIGN because future has completed 2024-12-02T15:22:28,013 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:28,013 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:28,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T15:22:28,165 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:28,165 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:28,165 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 3deadc203bd5ed12ca129719026b1ee7, disabling compactions & flushes 2024-12-02T15:22:28,165 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:28,165 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:28,165 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. after waiting 0 ms 2024-12-02T15:22:28,165 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:28,166 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:28,166 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:28,166 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing ccbd18e92db6010118a2b8a17b87472d, disabling compactions & flushes 2024-12-02T15:22:28,166 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:28,166 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:28,166 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. after waiting 0 ms 2024-12-02T15:22:28,166 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:28,170 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:22:28,171 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:28,171 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7. 2024-12-02T15:22:28,171 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 3deadc203bd5ed12ca129719026b1ee7: Waiting for close lock at 1733152948165Running coprocessor pre-close hooks at 1733152948165Disabling compacts and flushes for region at 1733152948165Disabling writes for close at 1733152948165Writing region close event to WAL at 1733152948166 (+1 ms)Running coprocessor post-close hooks at 1733152948171 (+5 ms)Closed at 1733152948171 2024-12-02T15:22:28,173 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:28,174 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=3deadc203bd5ed12ca129719026b1ee7, regionState=CLOSED 2024-12-02T15:22:28,176 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:22:28,177 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:28,177 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d. 2024-12-02T15:22:28,177 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for ccbd18e92db6010118a2b8a17b87472d: Waiting for close lock at 1733152948166Running coprocessor pre-close hooks at 1733152948166Disabling compacts and flushes for region at 1733152948166Disabling writes for close at 1733152948166Writing region close event to WAL at 1733152948167 (+1 ms)Running coprocessor post-close hooks at 1733152948177 (+10 ms)Closed at 1733152948177 2024-12-02T15:22:28,179 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:22:28,180 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:28,180 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=ccbd18e92db6010118a2b8a17b87472d, regionState=CLOSED 2024-12-02T15:22:28,182 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:28,183 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=85 2024-12-02T15:22:28,184 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure 3deadc203bd5ed12ca129719026b1ee7, server=be0458f36726,45405,1733152773787 in 167 msec 2024-12-02T15:22:28,187 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=84 2024-12-02T15:22:28,187 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=3deadc203bd5ed12ca129719026b1ee7, UNASSIGN in 175 msec 2024-12-02T15:22:28,188 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure ccbd18e92db6010118a2b8a17b87472d, server=be0458f36726,43025,1733152773677 in 170 msec 2024-12-02T15:22:28,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=84, resume processing ppid=83 2024-12-02T15:22:28,189 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=ccbd18e92db6010118a2b8a17b87472d, UNASSIGN in 178 msec 2024-12-02T15:22:28,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-12-02T15:22:28,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 182 msec 2024-12-02T15:22:28,193 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152948193"}]},"ts":"1733152948193"} 2024-12-02T15:22:28,195 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T15:22:28,195 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-02T15:22:28,199 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 195 msec 2024-12-02T15:22:28,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-12-02T15:22:28,324 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-12-02T15:22:28,324 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-02T15:22:28,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,327 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-12-02T15:22:28,329 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,333 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-02T15:22:28,335 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:28,335 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:28,337 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/recovered.edits] 2024-12-02T15:22:28,337 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/recovered.edits] 2024-12-02T15:22:28,341 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/cf/3ddb1b8c6e034f9b99603e43949ee161 2024-12-02T15:22:28,341 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/cf/7a2a0f87fbae4027b75d31fd5bb6b5b8 2024-12-02T15:22:28,349 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/recovered.edits/8.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7/recovered.edits/8.seqid 2024-12-02T15:22:28,349 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/recovered.edits/8.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d/recovered.edits/8.seqid 2024-12-02T15:22:28,349 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/ccbd18e92db6010118a2b8a17b87472d 2024-12-02T15:22:28,349 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportWithResetTtl/3deadc203bd5ed12ca129719026b1ee7 2024-12-02T15:22:28,350 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-02T15:22:28,352 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,355 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-02T15:22:28,358 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-02T15:22:28,359 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,359 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-02T15:22:28,359 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152948359"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,359 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152948359"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,362 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:22:28,362 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ccbd18e92db6010118a2b8a17b87472d, NAME => 'testExportWithResetTtl,,1733152928666.ccbd18e92db6010118a2b8a17b87472d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3deadc203bd5ed12ca129719026b1ee7, NAME => 'testExportWithResetTtl,1,1733152928666.3deadc203bd5ed12ca129719026b1ee7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:22:28,362 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-02T15:22:28,362 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152948362"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,364 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-12-02T15:22:28,364 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-02T15:22:28,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 39 msec 2024-12-02T15:22:28,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,368 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,368 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,369 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,369 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,369 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,369 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,377 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,377 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:28,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:28,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-12-02T15:22:28,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:28,378 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:28,378 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-12-02T15:22:28,378 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-12-02T15:22:28,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-02T15:22:28,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T15:22:28,384 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152948383"}]},"ts":"1733152948383"} 2024-12-02T15:22:28,386 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-02T15:22:28,386 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-02T15:22:28,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-02T15:22:28,388 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, UNASSIGN}] 2024-12-02T15:22:28,389 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, UNASSIGN 2024-12-02T15:22:28,389 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, UNASSIGN 2024-12-02T15:22:28,390 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=552215209d1542254b303f0cef0c7638, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:28,390 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=04be54dd4f37edf3659f725503c71f38, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:28,391 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, UNASSIGN because future has completed 2024-12-02T15:22:28,392 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:28,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:22:28,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, UNASSIGN because future has completed 2024-12-02T15:22:28,393 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:28,393 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:28,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T15:22:28,545 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:28,545 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:28,546 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 552215209d1542254b303f0cef0c7638, disabling compactions & flushes 2024-12-02T15:22:28,546 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:28,546 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:28,546 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. after waiting 0 ms 2024-12-02T15:22:28,546 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:28,546 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:28,546 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:28,547 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 04be54dd4f37edf3659f725503c71f38, disabling compactions & flushes 2024-12-02T15:22:28,547 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:28,547 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:28,547 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. after waiting 0 ms 2024-12-02T15:22:28,547 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:28,554 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:28,554 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:28,555 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:28,555 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:28,555 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38. 2024-12-02T15:22:28,555 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638. 2024-12-02T15:22:28,555 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 04be54dd4f37edf3659f725503c71f38: Waiting for close lock at 1733152948547Running coprocessor pre-close hooks at 1733152948547Disabling compacts and flushes for region at 1733152948547Disabling writes for close at 1733152948547Writing region close event to WAL at 1733152948548 (+1 ms)Running coprocessor post-close hooks at 1733152948554 (+6 ms)Closed at 1733152948555 (+1 ms) 2024-12-02T15:22:28,555 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 552215209d1542254b303f0cef0c7638: Waiting for close lock at 1733152948546Running coprocessor pre-close hooks at 1733152948546Disabling compacts and flushes for region at 1733152948546Disabling writes for close at 1733152948546Writing region close event to WAL at 1733152948548 (+2 ms)Running coprocessor post-close hooks at 1733152948555 (+7 ms)Closed at 1733152948555 2024-12-02T15:22:28,557 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 552215209d1542254b303f0cef0c7638 2024-12-02T15:22:28,558 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=552215209d1542254b303f0cef0c7638, regionState=CLOSED 2024-12-02T15:22:28,558 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:28,560 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:22:28,560 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=04be54dd4f37edf3659f725503c71f38, regionState=CLOSED 2024-12-02T15:22:28,561 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:28,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-12-02T15:22:28,563 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 552215209d1542254b303f0cef0c7638, server=be0458f36726,35155,1733152773848 in 168 msec 2024-12-02T15:22:28,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=552215209d1542254b303f0cef0c7638, UNASSIGN in 175 msec 2024-12-02T15:22:28,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-12-02T15:22:28,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 04be54dd4f37edf3659f725503c71f38, server=be0458f36726,43025,1733152773677 in 169 msec 2024-12-02T15:22:28,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=91, resume processing ppid=90 2024-12-02T15:22:28,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=04be54dd4f37edf3659f725503c71f38, UNASSIGN in 176 msec 2024-12-02T15:22:28,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-12-02T15:22:28,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 179 msec 2024-12-02T15:22:28,568 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152948568"}]},"ts":"1733152948568"} 2024-12-02T15:22:28,570 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-02T15:22:28,570 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-02T15:22:28,572 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 192 msec 2024-12-02T15:22:28,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-12-02T15:22:28,704 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T15:22:28,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-02T15:22:28,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,708 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-02T15:22:28,710 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,714 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-02T15:22:28,716 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:28,716 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638 2024-12-02T15:22:28,719 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/recovered.edits] 2024-12-02T15:22:28,719 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/recovered.edits] 2024-12-02T15:22:28,727 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,727 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,728 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/cf/8aaa5e3805c042eba3c598788d4cbbc6 2024-12-02T15:22:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,728 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-02T15:22:28,729 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/cf/9d39722304b844b0b3f16b147bef222b 2024-12-02T15:22:28,731 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38/recovered.edits/9.seqid 2024-12-02T15:22:28,731 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/04be54dd4f37edf3659f725503c71f38 2024-12-02T15:22:28,732 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638/recovered.edits/9.seqid 2024-12-02T15:22:28,732 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithResetTtl/552215209d1542254b303f0cef0c7638 2024-12-02T15:22:28,732 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-02T15:22:28,735 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,735 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-02T15:22:28,735 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,735 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:28,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-02T15:22:28,739 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-02T15:22:28,741 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-02T15:22:28,743 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,743 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-02T15:22:28,743 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152948743"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,743 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152948743"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,745 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:22:28,746 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 04be54dd4f37edf3659f725503c71f38, NAME => 'testtb-testExportWithResetTtl,,1733152927282.04be54dd4f37edf3659f725503c71f38.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 552215209d1542254b303f0cef0c7638, NAME => 'testtb-testExportWithResetTtl,1,1733152927282.552215209d1542254b303f0cef0c7638.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:22:28,746 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-02T15:22:28,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152948746"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:28,748 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-12-02T15:22:28,749 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-02T15:22:28,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 44 msec 2024-12-02T15:22:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-12-02T15:22:28,844 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-12-02T15:22:28,844 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-12-02T15:22:28,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T15:22:28,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-02T15:22:28,859 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-12-02T15:22:28,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-02T15:22:28,863 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-12-02T15:22:28,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-02T15:22:28,889 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=803 (was 796) Potentially hanging thread: process reaper (pid 117946) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46423 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:45664 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43581 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39823 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:52878 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:43581 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_990490590_1 at /127.0.0.1:52858 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2851 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:48960 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_990490590_1 at /127.0.0.1:48934 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=815 (was 810) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=661 (was 620) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=2037 (was 2175) 2024-12-02T15:22:28,889 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-02T15:22:28,903 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=803, OpenFileDescriptor=815, MaxFileDescriptor=1048576, SystemLoadAverage=661, ProcessCount=17, AvailableMemoryMB=2037 2024-12-02T15:22:28,903 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-02T15:22:28,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:22:28,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:28,906 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:22:28,906 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:28,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-12-02T15:22:28,907 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:22:28,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T15:22:28,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742018_1194 (size=407) 2024-12-02T15:22:28,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742018_1194 (size=407) 2024-12-02T15:22:28,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742018_1194 (size=407) 2024-12-02T15:22:28,916 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2886f811fadb2f0ee69adeabeb08886c, NAME => 'testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:28,917 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 5ca96d90aef87e85184736f1ab25df26, NAME => 'testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:28,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742019_1195 (size=68) 2024-12-02T15:22:28,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742019_1195 (size=68) 2024-12-02T15:22:28,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742019_1195 (size=68) 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 2886f811fadb2f0ee69adeabeb08886c, disabling compactions & flushes 2024-12-02T15:22:28,928 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. after waiting 0 ms 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:28,928 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:28,928 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2886f811fadb2f0ee69adeabeb08886c: Waiting for close lock at 1733152948928Disabling compacts and flushes for region at 1733152948928Disabling writes for close at 1733152948928Writing region close event to WAL at 1733152948928Closed at 1733152948928 2024-12-02T15:22:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742020_1196 (size=68) 2024-12-02T15:22:28,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742020_1196 (size=68) 2024-12-02T15:22:28,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742020_1196 (size=68) 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 5ca96d90aef87e85184736f1ab25df26, disabling compactions & flushes 2024-12-02T15:22:28,935 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. after waiting 0 ms 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:28,935 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:28,935 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 5ca96d90aef87e85184736f1ab25df26: Waiting for close lock at 1733152948935Disabling compacts and flushes for region at 1733152948935Disabling writes for close at 1733152948935Writing region close event to WAL at 1733152948935Closed at 1733152948935 2024-12-02T15:22:28,937 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:22:28,937 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733152948937"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152948937"}]},"ts":"1733152948937"} 2024-12-02T15:22:28,937 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733152948937"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152948937"}]},"ts":"1733152948937"} 2024-12-02T15:22:28,940 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:22:28,941 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:22:28,941 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152948941"}]},"ts":"1733152948941"} 2024-12-02T15:22:28,943 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T15:22:28,943 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:22:28,944 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:22:28,944 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:22:28,944 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:22:28,944 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:22:28,945 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, ASSIGN}] 2024-12-02T15:22:28,946 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, ASSIGN 2024-12-02T15:22:28,946 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, ASSIGN 2024-12-02T15:22:28,947 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:22:28,947 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:22:29,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T15:22:29,097 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:22:29,098 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=5ca96d90aef87e85184736f1ab25df26, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:29,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=2886f811fadb2f0ee69adeabeb08886c, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:29,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, ASSIGN because future has completed 2024-12-02T15:22:29,102 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:22:29,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, ASSIGN because future has completed 2024-12-02T15:22:29,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:22:29,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T15:22:29,260 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:29,261 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 5ca96d90aef87e85184736f1ab25df26, NAME => 'testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:22:29,261 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. service=AccessControlService 2024-12-02T15:22:29,261 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:29,262 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,262 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:29,262 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,262 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,262 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,262 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 2886f811fadb2f0ee69adeabeb08886c, NAME => 'testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:22:29,263 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. service=AccessControlService 2024-12-02T15:22:29,263 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:29,263 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,263 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:29,263 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,263 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,264 INFO [StoreOpener-5ca96d90aef87e85184736f1ab25df26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,265 INFO [StoreOpener-2886f811fadb2f0ee69adeabeb08886c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,266 INFO [StoreOpener-5ca96d90aef87e85184736f1ab25df26-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5ca96d90aef87e85184736f1ab25df26 columnFamilyName cf 2024-12-02T15:22:29,266 DEBUG [StoreOpener-5ca96d90aef87e85184736f1ab25df26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:29,266 INFO [StoreOpener-2886f811fadb2f0ee69adeabeb08886c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2886f811fadb2f0ee69adeabeb08886c columnFamilyName cf 2024-12-02T15:22:29,266 INFO [StoreOpener-5ca96d90aef87e85184736f1ab25df26-1 {}] regionserver.HStore(327): Store=5ca96d90aef87e85184736f1ab25df26/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:29,266 DEBUG [StoreOpener-2886f811fadb2f0ee69adeabeb08886c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:29,267 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,267 INFO [StoreOpener-2886f811fadb2f0ee69adeabeb08886c-1 {}] regionserver.HStore(327): Store=2886f811fadb2f0ee69adeabeb08886c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:29,267 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,267 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,268 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,268 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,268 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,269 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,269 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,269 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,269 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,271 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,272 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,280 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:29,280 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 5ca96d90aef87e85184736f1ab25df26; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70014573, jitterRate=0.04329843819141388}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:29,281 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,281 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:29,281 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 5ca96d90aef87e85184736f1ab25df26: Running coprocessor pre-open hook at 1733152949262Writing region info on filesystem at 1733152949262Initializing all the Stores at 1733152949263 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152949263Cleaning up temporary data from old regions at 1733152949269 (+6 ms)Running coprocessor post-open hooks at 1733152949281 (+12 ms)Region opened successfully at 1733152949281 2024-12-02T15:22:29,281 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 2886f811fadb2f0ee69adeabeb08886c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69211639, jitterRate=0.031333789229393005}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:29,281 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,281 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 2886f811fadb2f0ee69adeabeb08886c: Running coprocessor pre-open hook at 1733152949263Writing region info on filesystem at 1733152949263Initializing all the Stores at 1733152949264 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152949264Cleaning up temporary data from old regions at 1733152949269 (+5 ms)Running coprocessor post-open hooks at 1733152949281 (+12 ms)Region opened successfully at 1733152949281 2024-12-02T15:22:29,282 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., pid=99, masterSystemTime=1733152949257 2024-12-02T15:22:29,282 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c., pid=100, masterSystemTime=1733152949259 2024-12-02T15:22:29,284 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,284 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,284 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=2886f811fadb2f0ee69adeabeb08886c, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:29,284 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:29,284 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:29,285 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=5ca96d90aef87e85184736f1ab25df26, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:29,286 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:22:29,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:22:29,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-12-02T15:22:29,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848 in 182 msec 2024-12-02T15:22:29,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-12-02T15:22:29,289 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787 in 186 msec 2024-12-02T15:22:29,290 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, ASSIGN in 343 msec 2024-12-02T15:22:29,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=98, resume processing ppid=96 2024-12-02T15:22:29,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, ASSIGN in 344 msec 2024-12-02T15:22:29,291 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:22:29,292 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152949292"}]},"ts":"1733152949292"} 2024-12-02T15:22:29,293 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T15:22:29,294 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:22:29,294 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-02T15:22:29,297 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:22:29,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:29,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:29,318 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:29,318 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:29,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:29,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:29,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:29,327 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:29,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 422 msec 2024-12-02T15:22:29,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-12-02T15:22:29,533 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T15:22:29,533 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-02T15:22:29,533 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:29,536 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-02T15:22:29,536 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:29,536 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemState assigned. 2024-12-02T15:22:29,536 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:22:29,539 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152949539 (current time:1733152949539). 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e375855, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:29,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:29,540 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14baf177, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:29,541 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,542 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54472, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:29,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a61200c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:29,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:29,544 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,545 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33434, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:22:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,547 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36e520ee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:29,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:29,548 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:29,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:29,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:29,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33020f49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:29,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:29,549 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,550 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54488, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:29,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49b76028, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:29,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:29,552 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,553 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33444, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:29,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,556 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36840, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:22:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,557 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:29,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:22:29,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:22:29,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T15:22:29,560 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T15:22:29,561 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:29,563 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:29,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742021_1197 (size=170) 2024-12-02T15:22:29,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742021_1197 (size=170) 2024-12-02T15:22:29,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742021_1197 (size=170) 2024-12-02T15:22:29,571 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:29,571 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26}] 2024-12-02T15:22:29,572 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,572 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T15:22:29,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:29,724 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 5ca96d90aef87e85184736f1ab25df26: 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 2886f811fadb2f0ee69adeabeb08886c: 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. for emptySnaptb0-testExportFileSystemState completed. 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:29,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742023_1199 (size=71) 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742023_1199 (size=71) 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742022_1198 (size=71) 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742023_1199 (size=71) 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742022_1198 (size=71) 2024-12-02T15:22:29,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742022_1198 (size=71) 2024-12-02T15:22:29,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-12-02T15:22:29,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:29,733 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-02T15:22:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-12-02T15:22:29,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-12-02T15:22:29,733 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,733 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,734 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:29,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 in 163 msec 2024-12-02T15:22:29,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=102, resume processing ppid=101 2024-12-02T15:22:29,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c in 163 msec 2024-12-02T15:22:29,736 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:29,737 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:29,737 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:29,737 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:29,738 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:29,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742024_1200 (size=552) 2024-12-02T15:22:29,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742024_1200 (size=552) 2024-12-02T15:22:29,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742024_1200 (size=552) 2024-12-02T15:22:29,752 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:29,757 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:29,758 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:29,759 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:29,759 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-12-02T15:22:29,760 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 201 msec 2024-12-02T15:22:29,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-12-02T15:22:29,874 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T15:22:29,877 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='0adf57c3d5ce676e35fec82fc85018924', locateType=CURRENT is [region=testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:29,878 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='111d953aa1b1d5b42dfeb99bc9eeb0b1a', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:29,879 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='23dc0f5b222c6238d317ea58922011d6f', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:29,880 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='3a02661a795292d1dd8ee1b66d7a8475a', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:29,881 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='43c09552e15d0b32b67199c34cd49cefe', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:29,881 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemState', row='546b3b904496e9581520dd9978774a156', locateType=CURRENT is [region=testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:22:29,883 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:29,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:29,885 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:22:29,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-12-02T15:22:29,888 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:29,888 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:29,890 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:22:29,894 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:22:29,898 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:22:29,900 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:22:29,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152949900 (current time:1733152949900). 2024-12-02T15:22:29,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:29,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-02T15:22:29,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@634ba08f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:29,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:29,902 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:29,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:29,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:29,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@fdff7c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:29,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:29,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,903 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54512, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:29,903 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d3e1dca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:29,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:29,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,905 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33446, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,906 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:29,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:29,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,906 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:29,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56603e2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:29,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:29,908 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:29,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:29,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:29,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4cf3f738, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:29,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:29,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,909 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54530, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:29,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4562dc83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:29,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:29,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:29,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33462, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:29,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:29,914 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36854, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:29,915 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:29,915 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:29,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:22:29,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:22:29,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T15:22:29,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:29,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T15:22:29,918 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:29,921 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742025_1201 (size=165) 2024-12-02T15:22:29,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742025_1201 (size=165) 2024-12-02T15:22:29,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742025_1201 (size=165) 2024-12-02T15:22:29,928 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:29,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26}] 2024-12-02T15:22:29,929 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:29,930 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:30,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T15:22:30,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-02T15:22:30,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-02T15:22:30,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:30,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:30,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 2886f811fadb2f0ee69adeabeb08886c 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T15:22:30,082 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 5ca96d90aef87e85184736f1ab25df26 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T15:22:30,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/.tmp/cf/faf420796b764effa37d40b22a4fd4a7 is 71, key is 0221b882e63f1c941346843f743dffe7/cf:q/1733152949883/Put/seqid=0 2024-12-02T15:22:30,096 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/.tmp/cf/9de4fe0c5d3d44099d828ad299dd7055 is 71, key is 12d2169faf197cd80dea3fee15542673/cf:q/1733152949884/Put/seqid=0 2024-12-02T15:22:30,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742026_1202 (size=5354) 2024-12-02T15:22:30,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742026_1202 (size=5354) 2024-12-02T15:22:30,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742027_1203 (size=8258) 2024-12-02T15:22:30,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742027_1203 (size=8258) 2024-12-02T15:22:30,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742026_1202 (size=5354) 2024-12-02T15:22:30,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742027_1203 (size=8258) 2024-12-02T15:22:30,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/.tmp/cf/faf420796b764effa37d40b22a4fd4a7 2024-12-02T15:22:30,104 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/.tmp/cf/9de4fe0c5d3d44099d828ad299dd7055 2024-12-02T15:22:30,110 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/.tmp/cf/9de4fe0c5d3d44099d828ad299dd7055 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055 2024-12-02T15:22:30,110 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/.tmp/cf/faf420796b764effa37d40b22a4fd4a7 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7 2024-12-02T15:22:30,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055, entries=46, sequenceid=6, filesize=8.1 K 2024-12-02T15:22:30,114 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7, entries=4, sequenceid=6, filesize=5.2 K 2024-12-02T15:22:30,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 2886f811fadb2f0ee69adeabeb08886c in 33ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:30,115 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 5ca96d90aef87e85184736f1ab25df26 in 33ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:30,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-02T15:22:30,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 5ca96d90aef87e85184736f1ab25df26: 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 2886f811fadb2f0ee69adeabeb08886c: 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. for snaptb0-testExportFileSystemState completed. 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. for snaptb0-testExportFileSystemState completed. 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7] hfiles 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055] hfiles 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7 for snapshot=snaptb0-testExportFileSystemState 2024-12-02T15:22:30,116 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055 for snapshot=snaptb0-testExportFileSystemState 2024-12-02T15:22:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742029_1205 (size=110) 2024-12-02T15:22:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742029_1205 (size=110) 2024-12-02T15:22:30,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742029_1205 (size=110) 2024-12-02T15:22:30,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:30,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-02T15:22:30,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-12-02T15:22:30,134 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:30,134 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:30,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2886f811fadb2f0ee69adeabeb08886c in 206 msec 2024-12-02T15:22:30,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742028_1204 (size=110) 2024-12-02T15:22:30,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742028_1204 (size=110) 2024-12-02T15:22:30,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742028_1204 (size=110) 2024-12-02T15:22:30,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:30,141 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-02T15:22:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-12-02T15:22:30,141 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:30,141 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:30,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-12-02T15:22:30,143 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 5ca96d90aef87e85184736f1ab25df26 in 213 msec 2024-12-02T15:22:30,143 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:30,144 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:30,144 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:30,145 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-02T15:22:30,145 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T15:22:30,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742030_1206 (size=630) 2024-12-02T15:22:30,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742030_1206 (size=630) 2024-12-02T15:22:30,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742030_1206 (size=630) 2024-12-02T15:22:30,170 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:30,177 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:30,177 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T15:22:30,178 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:30,178 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-02T15:22:30,180 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 263 msec 2024-12-02T15:22:30,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-12-02T15:22:30,233 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T15:22:30,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233 2024-12-02T15:22:30,233 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:30,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:30,259 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T15:22:30,261 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:22:30,265 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-02T15:22:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742032_1208 (size=165) 2024-12-02T15:22:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742032_1208 (size=165) 2024-12-02T15:22:30,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742032_1208 (size=165) 2024-12-02T15:22:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742031_1207 (size=630) 2024-12-02T15:22:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742031_1207 (size=630) 2024-12-02T15:22:30,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742031_1207 (size=630) 2024-12-02T15:22:30,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:30,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:30,286 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:30,478 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000002/launch_container.sh] 2024-12-02T15:22:30,478 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000002/container_tokens] 2024-12-02T15:22:30,478 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000002/sysfs] 2024-12-02T15:22:31,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-5396858188951729204.jar 2024-12-02T15:22:31,191 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,192 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,253 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-15840552164232128404.jar 2024-12-02T15:22:31,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,254 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:31,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:22:31,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:22:31,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:22:31,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:22:31,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:22:31,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:22:31,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:22:31,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:22:31,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:22:31,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:22:31,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:22:31,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:31,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:31,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:31,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:31,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:31,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:31,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:31,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742033_1209 (size=24020) 2024-12-02T15:22:31,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742033_1209 (size=24020) 2024-12-02T15:22:31,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742033_1209 (size=24020) 2024-12-02T15:22:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742034_1210 (size=77755) 2024-12-02T15:22:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742034_1210 (size=77755) 2024-12-02T15:22:31,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742034_1210 (size=77755) 2024-12-02T15:22:31,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742035_1211 (size=131360) 2024-12-02T15:22:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742035_1211 (size=131360) 2024-12-02T15:22:31,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742035_1211 (size=131360) 2024-12-02T15:22:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742036_1212 (size=111793) 2024-12-02T15:22:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742036_1212 (size=111793) 2024-12-02T15:22:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742036_1212 (size=111793) 2024-12-02T15:22:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742037_1213 (size=1832290) 2024-12-02T15:22:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742037_1213 (size=1832290) 2024-12-02T15:22:31,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742037_1213 (size=1832290) 2024-12-02T15:22:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742038_1214 (size=8360005) 2024-12-02T15:22:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742038_1214 (size=8360005) 2024-12-02T15:22:31,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742038_1214 (size=8360005) 2024-12-02T15:22:31,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742039_1215 (size=503880) 2024-12-02T15:22:31,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742039_1215 (size=503880) 2024-12-02T15:22:31,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742039_1215 (size=503880) 2024-12-02T15:22:31,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742040_1216 (size=322274) 2024-12-02T15:22:31,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742040_1216 (size=322274) 2024-12-02T15:22:31,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742040_1216 (size=322274) 2024-12-02T15:22:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742041_1217 (size=20406) 2024-12-02T15:22:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742041_1217 (size=20406) 2024-12-02T15:22:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742041_1217 (size=20406) 2024-12-02T15:22:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742042_1218 (size=45609) 2024-12-02T15:22:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742042_1218 (size=45609) 2024-12-02T15:22:31,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742042_1218 (size=45609) 2024-12-02T15:22:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742043_1219 (size=136454) 2024-12-02T15:22:31,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742043_1219 (size=136454) 2024-12-02T15:22:31,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742043_1219 (size=136454) 2024-12-02T15:22:31,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742044_1220 (size=443171) 2024-12-02T15:22:31,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742044_1220 (size=443171) 2024-12-02T15:22:31,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742044_1220 (size=443171) 2024-12-02T15:22:31,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742045_1221 (size=1597136) 2024-12-02T15:22:31,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742045_1221 (size=1597136) 2024-12-02T15:22:31,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742045_1221 (size=1597136) 2024-12-02T15:22:31,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742046_1222 (size=30873) 2024-12-02T15:22:31,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742046_1222 (size=30873) 2024-12-02T15:22:31,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742046_1222 (size=30873) 2024-12-02T15:22:31,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742047_1223 (size=29229) 2024-12-02T15:22:31,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742047_1223 (size=29229) 2024-12-02T15:22:31,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742047_1223 (size=29229) 2024-12-02T15:22:31,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742048_1224 (size=903849) 2024-12-02T15:22:31,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742048_1224 (size=903849) 2024-12-02T15:22:31,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742048_1224 (size=903849) 2024-12-02T15:22:31,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742049_1225 (size=5175431) 2024-12-02T15:22:31,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742049_1225 (size=5175431) 2024-12-02T15:22:31,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742049_1225 (size=5175431) 2024-12-02T15:22:31,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742050_1226 (size=232881) 2024-12-02T15:22:31,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742050_1226 (size=232881) 2024-12-02T15:22:31,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742050_1226 (size=232881) 2024-12-02T15:22:31,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742051_1227 (size=1323991) 2024-12-02T15:22:31,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742051_1227 (size=1323991) 2024-12-02T15:22:31,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742051_1227 (size=1323991) 2024-12-02T15:22:31,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742052_1228 (size=4695811) 2024-12-02T15:22:31,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742052_1228 (size=4695811) 2024-12-02T15:22:31,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742052_1228 (size=4695811) 2024-12-02T15:22:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742053_1229 (size=1877034) 2024-12-02T15:22:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742053_1229 (size=1877034) 2024-12-02T15:22:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742053_1229 (size=1877034) 2024-12-02T15:22:31,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742054_1230 (size=6424745) 2024-12-02T15:22:31,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742054_1230 (size=6424745) 2024-12-02T15:22:31,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742054_1230 (size=6424745) 2024-12-02T15:22:31,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742055_1231 (size=217555) 2024-12-02T15:22:31,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742055_1231 (size=217555) 2024-12-02T15:22:31,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742055_1231 (size=217555) 2024-12-02T15:22:31,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742056_1232 (size=4188619) 2024-12-02T15:22:31,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742056_1232 (size=4188619) 2024-12-02T15:22:31,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742056_1232 (size=4188619) 2024-12-02T15:22:31,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742057_1233 (size=127628) 2024-12-02T15:22:31,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742057_1233 (size=127628) 2024-12-02T15:22:31,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742057_1233 (size=127628) 2024-12-02T15:22:31,634 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:22:31,637 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-02T15:22:31,639 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-02T15:22:31,639 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-02T15:22:31,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742058_1234 (size=447) 2024-12-02T15:22:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742058_1234 (size=447) 2024-12-02T15:22:31,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742058_1234 (size=447) 2024-12-02T15:22:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742059_1235 (size=21) 2024-12-02T15:22:31,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742059_1235 (size=21) 2024-12-02T15:22:31,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742059_1235 (size=21) 2024-12-02T15:22:31,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742060_1236 (size=304089) 2024-12-02T15:22:31,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742060_1236 (size=304089) 2024-12-02T15:22:31,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742060_1236 (size=304089) 2024-12-02T15:22:31,776 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:22:32,793 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:32,794 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:32,800 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0003_000001 (auth:SIMPLE) from 127.0.0.1:52796 2024-12-02T15:22:32,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000001/launch_container.sh] 2024-12-02T15:22:32,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000001/container_tokens] 2024-12-02T15:22:32,811 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0003/container_1733152780087_0003_01_000001/sysfs] 2024-12-02T15:22:33,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T15:22:33,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T15:22:33,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-02T15:22:33,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-02T15:22:33,667 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:37772 2024-12-02T15:22:33,692 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:37,820 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2886f811fadb2f0ee69adeabeb08886c changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:22:37,820 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 5ca96d90aef87e85184736f1ab25df26 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:22:38,538 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:55240 2024-12-02T15:22:38,597 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:38,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742061_1237 (size=349787) 2024-12-02T15:22:38,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742061_1237 (size=349787) 2024-12-02T15:22:38,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742061_1237 (size=349787) 2024-12-02T15:22:41,207 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:37512 2024-12-02T15:22:41,246 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:44036 2024-12-02T15:22:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742062_1238 (size=8258) 2024-12-02T15:22:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742062_1238 (size=8258) 2024-12-02T15:22:46,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742062_1238 (size=8258) 2024-12-02T15:22:46,834 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000002/launch_container.sh] 2024-12-02T15:22:46,834 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000002/container_tokens] 2024-12-02T15:22:46,834 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000002/sysfs] 2024-12-02T15:22:47,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742064_1240 (size=5354) 2024-12-02T15:22:47,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742064_1240 (size=5354) 2024-12-02T15:22:47,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742064_1240 (size=5354) 2024-12-02T15:22:47,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742063_1239 (size=22168) 2024-12-02T15:22:47,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742063_1239 (size=22168) 2024-12-02T15:22:47,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742063_1239 (size=22168) 2024-12-02T15:22:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742065_1241 (size=466) 2024-12-02T15:22:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742065_1241 (size=466) 2024-12-02T15:22:47,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742065_1241 (size=466) 2024-12-02T15:22:47,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742066_1242 (size=22168) 2024-12-02T15:22:47,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742066_1242 (size=22168) 2024-12-02T15:22:47,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742066_1242 (size=22168) 2024-12-02T15:22:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742067_1243 (size=349787) 2024-12-02T15:22:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742067_1243 (size=349787) 2024-12-02T15:22:47,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742067_1243 (size=349787) 2024-12-02T15:22:47,812 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:41504 2024-12-02T15:22:47,837 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000003/launch_container.sh] 2024-12-02T15:22:47,837 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000003/container_tokens] 2024-12-02T15:22:47,837 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000003/sysfs] 2024-12-02T15:22:49,120 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:22:49,122 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:22:49,128 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-12-02T15:22:49,128 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:22:49,129 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:22:49,129 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T15:22:49,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T15:22:49,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T15:22:49,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-02T15:22:49,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-02T15:22:49,130 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733152950233/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-02T15:22:49,137 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-02T15:22:49,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T15:22:49,141 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152969141"}]},"ts":"1733152969141"} 2024-12-02T15:22:49,143 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T15:22:49,143 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-02T15:22:49,144 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-02T15:22:49,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, UNASSIGN}] 2024-12-02T15:22:49,146 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, UNASSIGN 2024-12-02T15:22:49,146 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, UNASSIGN 2024-12-02T15:22:49,147 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=5ca96d90aef87e85184736f1ab25df26, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:22:49,147 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=2886f811fadb2f0ee69adeabeb08886c, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:49,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, UNASSIGN because future has completed 2024-12-02T15:22:49,149 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:49,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:22:49,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, UNASSIGN because future has completed 2024-12-02T15:22:49,151 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:22:49,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:22:49,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T15:22:49,304 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:49,304 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:49,304 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 5ca96d90aef87e85184736f1ab25df26, disabling compactions & flushes 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 2886f811fadb2f0ee69adeabeb08886c, disabling compactions & flushes 2024-12-02T15:22:49,305 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:49,305 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. after waiting 0 ms 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:49,305 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:49,306 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. after waiting 0 ms 2024-12-02T15:22:49,306 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:49,316 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:49,316 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:22:49,317 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:49,317 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:22:49,317 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c. 2024-12-02T15:22:49,317 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26. 2024-12-02T15:22:49,317 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 5ca96d90aef87e85184736f1ab25df26: Waiting for close lock at 1733152969305Running coprocessor pre-close hooks at 1733152969305Disabling compacts and flushes for region at 1733152969305Disabling writes for close at 1733152969305Writing region close event to WAL at 1733152969307 (+2 ms)Running coprocessor post-close hooks at 1733152969317 (+10 ms)Closed at 1733152969317 2024-12-02T15:22:49,317 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 2886f811fadb2f0ee69adeabeb08886c: Waiting for close lock at 1733152969305Running coprocessor pre-close hooks at 1733152969305Disabling compacts and flushes for region at 1733152969305Disabling writes for close at 1733152969306 (+1 ms)Writing region close event to WAL at 1733152969307 (+1 ms)Running coprocessor post-close hooks at 1733152969317 (+10 ms)Closed at 1733152969317 2024-12-02T15:22:49,319 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:49,320 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=5ca96d90aef87e85184736f1ab25df26, regionState=CLOSED 2024-12-02T15:22:49,320 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:49,320 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=2886f811fadb2f0ee69adeabeb08886c, regionState=CLOSED 2024-12-02T15:22:49,322 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:22:49,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:22:49,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-12-02T15:22:49,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-12-02T15:22:49,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 5ca96d90aef87e85184736f1ab25df26, server=be0458f36726,45405,1733152773787 in 174 msec 2024-12-02T15:22:49,325 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 2886f811fadb2f0ee69adeabeb08886c, server=be0458f36726,35155,1733152773848 in 173 msec 2024-12-02T15:22:49,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=5ca96d90aef87e85184736f1ab25df26, UNASSIGN in 180 msec 2024-12-02T15:22:49,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-12-02T15:22:49,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=2886f811fadb2f0ee69adeabeb08886c, UNASSIGN in 180 msec 2024-12-02T15:22:49,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-12-02T15:22:49,328 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 183 msec 2024-12-02T15:22:49,329 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152969329"}]},"ts":"1733152969329"} 2024-12-02T15:22:49,331 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T15:22:49,331 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-02T15:22:49,333 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 195 msec 2024-12-02T15:22:49,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-12-02T15:22:49,454 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T15:22:49,454 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-02T15:22:49,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,456 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-02T15:22:49,457 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,460 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-02T15:22:49,461 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:49,461 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:49,464 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/recovered.edits] 2024-12-02T15:22:49,464 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/recovered.edits] 2024-12-02T15:22:49,477 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/cf/faf420796b764effa37d40b22a4fd4a7 2024-12-02T15:22:49,479 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/cf/9de4fe0c5d3d44099d828ad299dd7055 2024-12-02T15:22:49,480 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c/recovered.edits/9.seqid 2024-12-02T15:22:49,480 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/2886f811fadb2f0ee69adeabeb08886c 2024-12-02T15:22:49,481 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26/recovered.edits/9.seqid 2024-12-02T15:22:49,481 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemState/5ca96d90aef87e85184736f1ab25df26 2024-12-02T15:22:49,481 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-02T15:22:49,483 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,484 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,484 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T15:22:49,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T15:22:49,486 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-02T15:22:49,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T15:22:49,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-02T15:22:49,489 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-02T15:22:49,492 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,492 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:49,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,493 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-02T15:22:49,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:49,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:49,493 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-02T15:22:49,493 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:49,493 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152969493"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:49,493 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733152969493"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:49,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-02T15:22:49,496 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:22:49,496 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2886f811fadb2f0ee69adeabeb08886c, NAME => 'testtb-testExportFileSystemState,,1733152948904.2886f811fadb2f0ee69adeabeb08886c.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 5ca96d90aef87e85184736f1ab25df26, NAME => 'testtb-testExportFileSystemState,1,1733152948904.5ca96d90aef87e85184736f1ab25df26.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:22:49,496 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-02T15:22:49,496 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733152969496"}]},"ts":"9223372036854775807"} 2024-12-02T15:22:49,499 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-12-02T15:22:49,500 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-02T15:22:49,501 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 46 msec 2024-12-02T15:22:49,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-12-02T15:22:49,603 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-12-02T15:22:49,603 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-12-02T15:22:49,609 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T15:22:49,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-02T15:22:49,613 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-12-02T15:22:49,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-02T15:22:49,632 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=805 (was 803) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:57608 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MoveIntermediateToDone Thread #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 121506) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:43429 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2033293157_1 at /127.0.0.1:37346 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43429 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:42461 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MoveIntermediateToDone Thread #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_2033293157_1 at /127.0.0.1:57580 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:37360 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3591 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: MoveIntermediateToDone Thread #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:34214 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=813 (was 815), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=671 (was 661) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=1807 (was 2037) 2024-12-02T15:22:49,633 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-02T15:22:49,648 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=805, OpenFileDescriptor=813, MaxFileDescriptor=1048576, SystemLoadAverage=671, ProcessCount=17, AvailableMemoryMB=1806 2024-12-02T15:22:49,648 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-12-02T15:22:49,650 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:22:49,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:22:49,652 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:22:49,652 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:49,652 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-12-02T15:22:49,653 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:22:49,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T15:22:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742068_1244 (size=404) 2024-12-02T15:22:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742068_1244 (size=404) 2024-12-02T15:22:49,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742068_1244 (size=404) 2024-12-02T15:22:49,661 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6ee1e20048f6403ae2d1a265da61a290, NAME => 'testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:49,665 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1a2e837a85dcfb13b32ecdc2cd231ae0, NAME => 'testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:49,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742069_1245 (size=65) 2024-12-02T15:22:49,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742069_1245 (size=65) 2024-12-02T15:22:49,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742069_1245 (size=65) 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 6ee1e20048f6403ae2d1a265da61a290, disabling compactions & flushes 2024-12-02T15:22:49,675 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. after waiting 0 ms 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:49,675 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:49,675 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6ee1e20048f6403ae2d1a265da61a290: Waiting for close lock at 1733152969675Disabling compacts and flushes for region at 1733152969675Disabling writes for close at 1733152969675Writing region close event to WAL at 1733152969675Closed at 1733152969675 2024-12-02T15:22:49,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742070_1246 (size=65) 2024-12-02T15:22:49,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742070_1246 (size=65) 2024-12-02T15:22:49,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742070_1246 (size=65) 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 1a2e837a85dcfb13b32ecdc2cd231ae0, disabling compactions & flushes 2024-12-02T15:22:49,677 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. after waiting 0 ms 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:49,677 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:49,677 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 1a2e837a85dcfb13b32ecdc2cd231ae0: Waiting for close lock at 1733152969677Disabling compacts and flushes for region at 1733152969677Disabling writes for close at 1733152969677Writing region close event to WAL at 1733152969677Closed at 1733152969677 2024-12-02T15:22:49,678 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:22:49,679 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733152969678"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152969678"}]},"ts":"1733152969678"} 2024-12-02T15:22:49,679 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733152969678"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733152969678"}]},"ts":"1733152969678"} 2024-12-02T15:22:49,681 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:22:49,681 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:22:49,682 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152969681"}]},"ts":"1733152969681"} 2024-12-02T15:22:49,683 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-02T15:22:49,683 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:22:49,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:22:49,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:22:49,685 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:22:49,685 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:22:49,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, ASSIGN}] 2024-12-02T15:22:49,687 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, ASSIGN 2024-12-02T15:22:49,687 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, ASSIGN 2024-12-02T15:22:49,687 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:22:49,687 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:22:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T15:22:49,838 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:22:49,838 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=6ee1e20048f6403ae2d1a265da61a290, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:49,838 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=1a2e837a85dcfb13b32ecdc2cd231ae0, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:49,840 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, ASSIGN because future has completed 2024-12-02T15:22:49,840 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:22:49,841 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, ASSIGN because future has completed 2024-12-02T15:22:49,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:22:49,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T15:22:49,995 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:49,995 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:49,995 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 1a2e837a85dcfb13b32ecdc2cd231ae0, NAME => 'testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:22:49,995 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 6ee1e20048f6403ae2d1a265da61a290, NAME => 'testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. service=AccessControlService 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. service=AccessControlService 2024-12-02T15:22:49,996 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:49,996 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:49,996 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:49,997 INFO [StoreOpener-6ee1e20048f6403ae2d1a265da61a290-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:49,997 INFO [StoreOpener-1a2e837a85dcfb13b32ecdc2cd231ae0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:49,999 INFO [StoreOpener-1a2e837a85dcfb13b32ecdc2cd231ae0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a2e837a85dcfb13b32ecdc2cd231ae0 columnFamilyName cf 2024-12-02T15:22:49,999 INFO [StoreOpener-6ee1e20048f6403ae2d1a265da61a290-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6ee1e20048f6403ae2d1a265da61a290 columnFamilyName cf 2024-12-02T15:22:49,999 DEBUG [StoreOpener-1a2e837a85dcfb13b32ecdc2cd231ae0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:49,999 DEBUG [StoreOpener-6ee1e20048f6403ae2d1a265da61a290-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:22:49,999 INFO [StoreOpener-1a2e837a85dcfb13b32ecdc2cd231ae0-1 {}] regionserver.HStore(327): Store=1a2e837a85dcfb13b32ecdc2cd231ae0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:49,999 INFO [StoreOpener-6ee1e20048f6403ae2d1a265da61a290-1 {}] regionserver.HStore(327): Store=6ee1e20048f6403ae2d1a265da61a290/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:22:49,999 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,000 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,000 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,000 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,000 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,000 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,001 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,001 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,001 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,001 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,002 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,002 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,004 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:50,004 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:22:50,004 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 6ee1e20048f6403ae2d1a265da61a290; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63404114, jitterRate=-0.055205076932907104}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:50,005 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,005 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 1a2e837a85dcfb13b32ecdc2cd231ae0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71716763, jitterRate=0.06866304576396942}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:22:50,005 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,005 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 6ee1e20048f6403ae2d1a265da61a290: Running coprocessor pre-open hook at 1733152969996Writing region info on filesystem at 1733152969996Initializing all the Stores at 1733152969997 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152969997Cleaning up temporary data from old regions at 1733152970001 (+4 ms)Running coprocessor post-open hooks at 1733152970005 (+4 ms)Region opened successfully at 1733152970005 2024-12-02T15:22:50,005 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 1a2e837a85dcfb13b32ecdc2cd231ae0: Running coprocessor pre-open hook at 1733152969996Writing region info on filesystem at 1733152969996Initializing all the Stores at 1733152969997 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733152969997Cleaning up temporary data from old regions at 1733152970001 (+4 ms)Running coprocessor post-open hooks at 1733152970005 (+4 ms)Region opened successfully at 1733152970005 2024-12-02T15:22:50,006 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290., pid=117, masterSystemTime=1733152969992 2024-12-02T15:22:50,006 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., pid=118, masterSystemTime=1733152969992 2024-12-02T15:22:50,008 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:50,008 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:50,008 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=1a2e837a85dcfb13b32ecdc2cd231ae0, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:22:50,008 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:50,008 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:50,009 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=6ee1e20048f6403ae2d1a265da61a290, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:22:50,010 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:22:50,011 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:22:50,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-12-02T15:22:50,012 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848 in 170 msec 2024-12-02T15:22:50,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-12-02T15:22:50,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677 in 171 msec 2024-12-02T15:22:50,014 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, ASSIGN in 327 msec 2024-12-02T15:22:50,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-12-02T15:22:50,014 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, ASSIGN in 328 msec 2024-12-02T15:22:50,015 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:22:50,015 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733152970015"}]},"ts":"1733152970015"} 2024-12-02T15:22:50,017 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-02T15:22:50,017 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:22:50,017 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-02T15:22:50,020 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T15:22:50,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:50,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:50,051 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:50,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:22:50,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:50,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:50,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:50,060 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-02T15:22:50,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 409 msec 2024-12-02T15:22:50,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-12-02T15:22:50,283 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T15:22:50,284 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-02T15:22:50,284 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:50,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-02T15:22:50,287 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:50,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testConsecutiveExports assigned. 2024-12-02T15:22:50,287 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T15:22:50,290 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T15:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152970290 (current time:1733152970290). 2024-12-02T15:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T15:22:50,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@146efa41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:50,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:50,292 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65c82b9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:50,292 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:50,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,293 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40380, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:50,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bcb2557, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:50,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:50,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,296 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40452, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,297 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,297 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:50,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c16c9c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:50,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:50,299 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a8b4811, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:50,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,300 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40398, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:50,301 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e1040cd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:50,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:50,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,303 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40456, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,304 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:50,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,305 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39558, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,306 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:50,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,307 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:50,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T15:22:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:50,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T15:22:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T15:22:50,309 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:50,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T15:22:50,310 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:50,312 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742071_1247 (size=161) 2024-12-02T15:22:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742071_1247 (size=161) 2024-12-02T15:22:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742071_1247 (size=161) 2024-12-02T15:22:50,319 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:50,319 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0}] 2024-12-02T15:22:50,320 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,320 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T15:22:50,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-12-02T15:22:50,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-12-02T15:22:50,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:50,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 6ee1e20048f6403ae2d1a265da61a290: 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 1a2e837a85dcfb13b32ecdc2cd231ae0: 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. for emptySnaptb0-testConsecutiveExports completed. 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:50,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:22:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742072_1248 (size=68) 2024-12-02T15:22:50,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742072_1248 (size=68) 2024-12-02T15:22:50,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742073_1249 (size=68) 2024-12-02T15:22:50,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742072_1248 (size=68) 2024-12-02T15:22:50,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742073_1249 (size=68) 2024-12-02T15:22:50,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742073_1249 (size=68) 2024-12-02T15:22:50,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:50,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-12-02T15:22:50,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-12-02T15:22:50,494 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,495 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:50,497 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 in 177 msec 2024-12-02T15:22:50,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T15:22:50,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:50,894 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-12-02T15:22:50,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-12-02T15:22:50,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,895 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=121, resume processing ppid=119 2024-12-02T15:22:50,898 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 in 577 msec 2024-12-02T15:22:50,898 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:50,898 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:50,899 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:50,899 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-02T15:22:50,900 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-02T15:22:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742074_1250 (size=543) 2024-12-02T15:22:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742074_1250 (size=543) 2024-12-02T15:22:50,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742074_1250 (size=543) 2024-12-02T15:22:50,915 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:50,920 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:50,921 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-02T15:22:50,922 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:50,922 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-12-02T15:22:50,924 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 615 msec 2024-12-02T15:22:50,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-12-02T15:22:50,933 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T15:22:50,937 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='01c08e5406a7700c90baccf3ac50473f1', locateType=CURRENT is [region=testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:50,938 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='16712ef540e518f9dcdd9c8fa548ef8b7', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:50,939 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='25ae8218cd69e380497b26d3ff6829839', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:50,940 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='314488c4c15cdedaefe894bdac114162c', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:50,940 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='47e28ae88cb5f880697cd625159f9d212', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:50,941 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testConsecutiveExports', row='588a7a15f8edf33ea7e88c46b6ff1fce3', locateType=CURRENT is [region=testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:22:50,943 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:50,944 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:22:50,945 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T15:22:50,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-12-02T15:22:50,948 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:50,948 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:22:50,950 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T15:22:50,955 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T15:22:50,961 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-12-02T15:22:50,963 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T15:22:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733152970963 (current time:1733152970963). 2024-12-02T15:22:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:22:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-02T15:22:50,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:22:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fbae458, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:50,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:50,965 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:50,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:50,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:50,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4117bff7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:50,965 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:50,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,966 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40416, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:50,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c06bf91, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:50,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:50,968 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,969 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40460, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,970 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,970 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f77fe1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:22:50,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:22:50,971 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1615061f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:22:50,972 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,973 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40432, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:22:50,973 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@713167f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:22:50,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:22:50,974 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:22:50,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,975 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40474, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:22:50,977 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:22:50,978 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:22:50,979 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:22:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:22:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:22:50,979 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:22:50,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-02T15:22:50,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:22:50,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-02T15:22:50,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T15:22:50,982 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:22:50,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T15:22:50,983 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:22:50,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:22:50,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742075_1251 (size=156) 2024-12-02T15:22:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742075_1251 (size=156) 2024-12-02T15:22:50,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742075_1251 (size=156) 2024-12-02T15:22:50,993 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:22:50,993 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0}] 2024-12-02T15:22:50,994 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:50,995 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:51,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T15:22:51,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-12-02T15:22:51,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-02T15:22:51,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:51,146 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:51,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 6ee1e20048f6403ae2d1a265da61a290 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-02T15:22:51,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 1a2e837a85dcfb13b32ecdc2cd231ae0 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-02T15:22:51,171 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/.tmp/cf/cdc5144365f44db7adaa4aa6d1c66555 is 69, key is 01c08e5406a7700c90baccf3ac50473f1/cf:q/1733152970942/Put/seqid=0 2024-12-02T15:22:51,172 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/.tmp/cf/2108e06785cf491d9ada00a2c4d37a0f is 71, key is 116e633ffe877c9dfc9c4b465691bc66/cf:q/1733152970944/Put/seqid=0 2024-12-02T15:22:51,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742076_1252 (size=5149) 2024-12-02T15:22:51,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742076_1252 (size=5149) 2024-12-02T15:22:51,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742076_1252 (size=5149) 2024-12-02T15:22:51,179 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/.tmp/cf/cdc5144365f44db7adaa4aa6d1c66555 2024-12-02T15:22:51,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742077_1253 (size=8460) 2024-12-02T15:22:51,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742077_1253 (size=8460) 2024-12-02T15:22:51,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742077_1253 (size=8460) 2024-12-02T15:22:51,185 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/.tmp/cf/2108e06785cf491d9ada00a2c4d37a0f 2024-12-02T15:22:51,185 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/.tmp/cf/cdc5144365f44db7adaa4aa6d1c66555 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555 2024-12-02T15:22:51,189 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555, entries=1, sequenceid=6, filesize=5.0 K 2024-12-02T15:22:51,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/.tmp/cf/2108e06785cf491d9ada00a2c4d37a0f as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f 2024-12-02T15:22:51,190 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 6ee1e20048f6403ae2d1a265da61a290 in 44ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:51,190 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 6ee1e20048f6403ae2d1a265da61a290: 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. for snaptb0-testConsecutiveExports completed. 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555] hfiles 2024-12-02T15:22:51,191 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555 for snapshot=snaptb0-testConsecutiveExports 2024-12-02T15:22:51,195 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f, entries=49, sequenceid=6, filesize=8.3 K 2024-12-02T15:22:51,196 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for 1a2e837a85dcfb13b32ecdc2cd231ae0 in 50ms, sequenceid=6, compaction requested=false 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 1a2e837a85dcfb13b32ecdc2cd231ae0: 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. for snaptb0-testConsecutiveExports completed. 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f] hfiles 2024-12-02T15:22:51,196 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f for snapshot=snaptb0-testConsecutiveExports 2024-12-02T15:22:51,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742078_1254 (size=107) 2024-12-02T15:22:51,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742078_1254 (size=107) 2024-12-02T15:22:51,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742078_1254 (size=107) 2024-12-02T15:22:51,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:22:51,201 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-02T15:22:51,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-12-02T15:22:51,202 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:51,202 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:22:51,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6ee1e20048f6403ae2d1a265da61a290 in 209 msec 2024-12-02T15:22:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742079_1255 (size=107) 2024-12-02T15:22:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742079_1255 (size=107) 2024-12-02T15:22:51,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742079_1255 (size=107) 2024-12-02T15:22:51,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:22:51,210 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-12-02T15:22:51,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-12-02T15:22:51,211 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:51,211 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:22:51,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=124, resume processing ppid=122 2024-12-02T15:22:51,213 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:22:51,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0 in 219 msec 2024-12-02T15:22:51,214 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:22:51,214 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:22:51,214 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-02T15:22:51,215 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T15:22:51,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742080_1256 (size=621) 2024-12-02T15:22:51,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742080_1256 (size=621) 2024-12-02T15:22:51,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742080_1256 (size=621) 2024-12-02T15:22:51,232 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:22:51,237 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:22:51,238 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T15:22:51,239 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:22:51,239 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-02T15:22:51,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 259 msec 2024-12-02T15:22:51,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-12-02T15:22:51,304 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T15:22:51,304 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304 2024-12-02T15:22:51,304 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:51,336 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:22:51,336 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@53ba3ed2, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T15:22:51,337 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:22:51,341 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T15:22:51,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:51,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:51,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-1900159400420109503.jar 2024-12-02T15:22:52,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,223 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-1822747219432072115.jar 2024-12-02T15:22:52,224 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,224 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,225 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:22:52,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:22:52,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:22:52,226 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:22:52,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:22:52,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:22:52,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:22:52,227 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:22:52,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:22:52,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:22:52,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:22:52,228 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:22:52,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:52,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:52,229 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:52,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:52,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:22:52,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:52,230 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:22:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742081_1257 (size=24020) 2024-12-02T15:22:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742081_1257 (size=24020) 2024-12-02T15:22:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742081_1257 (size=24020) 2024-12-02T15:22:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742082_1258 (size=77755) 2024-12-02T15:22:52,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742082_1258 (size=77755) 2024-12-02T15:22:52,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742082_1258 (size=77755) 2024-12-02T15:22:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742083_1259 (size=131360) 2024-12-02T15:22:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742083_1259 (size=131360) 2024-12-02T15:22:52,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742083_1259 (size=131360) 2024-12-02T15:22:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742084_1260 (size=111793) 2024-12-02T15:22:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742084_1260 (size=111793) 2024-12-02T15:22:52,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742084_1260 (size=111793) 2024-12-02T15:22:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742085_1261 (size=1832290) 2024-12-02T15:22:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742085_1261 (size=1832290) 2024-12-02T15:22:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742085_1261 (size=1832290) 2024-12-02T15:22:52,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742086_1262 (size=8360005) 2024-12-02T15:22:52,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742086_1262 (size=8360005) 2024-12-02T15:22:52,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742086_1262 (size=8360005) 2024-12-02T15:22:52,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742087_1263 (size=503880) 2024-12-02T15:22:52,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742087_1263 (size=503880) 2024-12-02T15:22:52,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742087_1263 (size=503880) 2024-12-02T15:22:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742088_1264 (size=322274) 2024-12-02T15:22:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742088_1264 (size=322274) 2024-12-02T15:22:52,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742088_1264 (size=322274) 2024-12-02T15:22:52,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742089_1265 (size=20406) 2024-12-02T15:22:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742089_1265 (size=20406) 2024-12-02T15:22:52,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742089_1265 (size=20406) 2024-12-02T15:22:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742090_1266 (size=45609) 2024-12-02T15:22:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742090_1266 (size=45609) 2024-12-02T15:22:52,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742090_1266 (size=45609) 2024-12-02T15:22:52,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742091_1267 (size=136454) 2024-12-02T15:22:52,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742091_1267 (size=136454) 2024-12-02T15:22:52,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742091_1267 (size=136454) 2024-12-02T15:22:52,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742092_1268 (size=1597136) 2024-12-02T15:22:52,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742092_1268 (size=1597136) 2024-12-02T15:22:52,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742092_1268 (size=1597136) 2024-12-02T15:22:52,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742093_1269 (size=30873) 2024-12-02T15:22:52,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742093_1269 (size=30873) 2024-12-02T15:22:52,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742093_1269 (size=30873) 2024-12-02T15:22:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742094_1270 (size=29229) 2024-12-02T15:22:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742094_1270 (size=29229) 2024-12-02T15:22:52,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742094_1270 (size=29229) 2024-12-02T15:22:52,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742095_1271 (size=443171) 2024-12-02T15:22:52,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742095_1271 (size=443171) 2024-12-02T15:22:52,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742095_1271 (size=443171) 2024-12-02T15:22:52,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742096_1272 (size=903849) 2024-12-02T15:22:52,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742096_1272 (size=903849) 2024-12-02T15:22:52,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742096_1272 (size=903849) 2024-12-02T15:22:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742097_1273 (size=6424745) 2024-12-02T15:22:52,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742097_1273 (size=6424745) 2024-12-02T15:22:52,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742097_1273 (size=6424745) 2024-12-02T15:22:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742098_1274 (size=5175431) 2024-12-02T15:22:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742098_1274 (size=5175431) 2024-12-02T15:22:52,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742098_1274 (size=5175431) 2024-12-02T15:22:52,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742099_1275 (size=232881) 2024-12-02T15:22:52,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742099_1275 (size=232881) 2024-12-02T15:22:52,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742099_1275 (size=232881) 2024-12-02T15:22:52,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742100_1276 (size=1323991) 2024-12-02T15:22:52,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742100_1276 (size=1323991) 2024-12-02T15:22:52,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742100_1276 (size=1323991) 2024-12-02T15:22:52,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742101_1277 (size=4695811) 2024-12-02T15:22:52,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742101_1277 (size=4695811) 2024-12-02T15:22:52,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742101_1277 (size=4695811) 2024-12-02T15:22:52,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742102_1278 (size=1877034) 2024-12-02T15:22:52,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742102_1278 (size=1877034) 2024-12-02T15:22:52,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742102_1278 (size=1877034) 2024-12-02T15:22:52,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742103_1279 (size=217555) 2024-12-02T15:22:52,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742103_1279 (size=217555) 2024-12-02T15:22:52,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742103_1279 (size=217555) 2024-12-02T15:22:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742104_1280 (size=4188619) 2024-12-02T15:22:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742104_1280 (size=4188619) 2024-12-02T15:22:52,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742104_1280 (size=4188619) 2024-12-02T15:22:52,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742105_1281 (size=127628) 2024-12-02T15:22:52,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742105_1281 (size=127628) 2024-12-02T15:22:52,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742105_1281 (size=127628) 2024-12-02T15:22:52,581 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:22:52,583 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T15:22:52,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-02T15:22:52,585 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-02T15:22:52,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742106_1282 (size=441) 2024-12-02T15:22:52,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742106_1282 (size=441) 2024-12-02T15:22:52,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742106_1282 (size=441) 2024-12-02T15:22:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742107_1283 (size=21) 2024-12-02T15:22:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742107_1283 (size=21) 2024-12-02T15:22:52,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742107_1283 (size=21) 2024-12-02T15:22:52,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742108_1284 (size=304128) 2024-12-02T15:22:52,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742108_1284 (size=304128) 2024-12-02T15:22:52,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742108_1284 (size=304128) 2024-12-02T15:22:53,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T15:22:53,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-02T15:22:53,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-02T15:22:53,915 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:53,915 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:22:53,917 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0004_000001 (auth:SIMPLE) from 127.0.0.1:33954 2024-12-02T15:22:53,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000001/launch_container.sh] 2024-12-02T15:22:53,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000001/container_tokens] 2024-12-02T15:22:53,932 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0004/container_1733152780087_0004_01_000001/sysfs] 2024-12-02T15:22:54,833 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:49402 2024-12-02T15:22:54,853 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:22:59,554 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:50528 2024-12-02T15:22:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742109_1285 (size=349826) 2024-12-02T15:22:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742109_1285 (size=349826) 2024-12-02T15:22:59,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742109_1285 (size=349826) 2024-12-02T15:23:01,776 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:23:02,143 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:49406 2024-12-02T15:23:02,145 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:33964 2024-12-02T15:23:07,900 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000002/launch_container.sh] 2024-12-02T15:23:07,900 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000002/container_tokens] 2024-12-02T15:23:07,901 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000002/sysfs] 2024-12-02T15:23:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742110_1286 (size=22235) 2024-12-02T15:23:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742110_1286 (size=22235) 2024-12-02T15:23:08,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742110_1286 (size=22235) 2024-12-02T15:23:08,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742111_1287 (size=463) 2024-12-02T15:23:08,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742111_1287 (size=463) 2024-12-02T15:23:08,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742111_1287 (size=463) 2024-12-02T15:23:08,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742112_1288 (size=22235) 2024-12-02T15:23:08,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742112_1288 (size=22235) 2024-12-02T15:23:08,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742112_1288 (size=22235) 2024-12-02T15:23:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742113_1289 (size=349826) 2024-12-02T15:23:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742113_1289 (size=349826) 2024-12-02T15:23:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742113_1289 (size=349826) 2024-12-02T15:23:08,851 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:56598 2024-12-02T15:23:08,862 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733152780087_0005_01_000003 is : 143 2024-12-02T15:23:08,870 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000003/launch_container.sh] 2024-12-02T15:23:08,870 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000003/container_tokens] 2024-12-02T15:23:08,870 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000003/sysfs] 2024-12-02T15:23:10,048 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:23:10,049 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:23:10,050 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T15:23:10,051 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:23:10,051 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:23:10,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T15:23:10,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T15:23:10,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T15:23:10,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@53ba3ed2 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T15:23:10,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T15:23:10,052 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T15:23:10,053 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:10,077 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:10,077 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@53ba3ed2, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T15:23:10,079 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:23:10,083 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-02T15:23:10,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,119 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,889 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-14418450413254579614.jar 2024-12-02T15:23:10,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-11710362411148489906.jar 2024-12-02T15:23:10,941 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,942 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:10,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:23:10,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:23:10,943 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:23:10,944 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:10,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:10,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:10,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:10,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:10,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742114_1290 (size=24020) 2024-12-02T15:23:10,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742114_1290 (size=24020) 2024-12-02T15:23:10,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742114_1290 (size=24020) 2024-12-02T15:23:10,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742115_1291 (size=77755) 2024-12-02T15:23:10,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742115_1291 (size=77755) 2024-12-02T15:23:10,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742115_1291 (size=77755) 2024-12-02T15:23:11,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742116_1292 (size=131360) 2024-12-02T15:23:11,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742116_1292 (size=131360) 2024-12-02T15:23:11,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742116_1292 (size=131360) 2024-12-02T15:23:11,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742117_1293 (size=111793) 2024-12-02T15:23:11,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742117_1293 (size=111793) 2024-12-02T15:23:11,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742117_1293 (size=111793) 2024-12-02T15:23:11,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742118_1294 (size=1832290) 2024-12-02T15:23:11,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742118_1294 (size=1832290) 2024-12-02T15:23:11,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742118_1294 (size=1832290) 2024-12-02T15:23:11,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742119_1295 (size=8360005) 2024-12-02T15:23:11,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742119_1295 (size=8360005) 2024-12-02T15:23:11,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742119_1295 (size=8360005) 2024-12-02T15:23:11,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742120_1296 (size=503880) 2024-12-02T15:23:11,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742120_1296 (size=503880) 2024-12-02T15:23:11,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742120_1296 (size=503880) 2024-12-02T15:23:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742121_1297 (size=322274) 2024-12-02T15:23:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742121_1297 (size=322274) 2024-12-02T15:23:11,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742121_1297 (size=322274) 2024-12-02T15:23:11,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742122_1298 (size=20406) 2024-12-02T15:23:11,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742122_1298 (size=20406) 2024-12-02T15:23:11,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742122_1298 (size=20406) 2024-12-02T15:23:11,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742123_1299 (size=45609) 2024-12-02T15:23:11,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742123_1299 (size=45609) 2024-12-02T15:23:11,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742123_1299 (size=45609) 2024-12-02T15:23:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742124_1300 (size=136454) 2024-12-02T15:23:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742124_1300 (size=136454) 2024-12-02T15:23:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742124_1300 (size=136454) 2024-12-02T15:23:11,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742125_1301 (size=1597136) 2024-12-02T15:23:11,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742125_1301 (size=1597136) 2024-12-02T15:23:11,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742125_1301 (size=1597136) 2024-12-02T15:23:11,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742126_1302 (size=6424745) 2024-12-02T15:23:11,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742126_1302 (size=6424745) 2024-12-02T15:23:11,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742126_1302 (size=6424745) 2024-12-02T15:23:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742127_1303 (size=30873) 2024-12-02T15:23:11,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742127_1303 (size=30873) 2024-12-02T15:23:11,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742127_1303 (size=30873) 2024-12-02T15:23:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742128_1304 (size=29229) 2024-12-02T15:23:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742128_1304 (size=29229) 2024-12-02T15:23:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742128_1304 (size=29229) 2024-12-02T15:23:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742129_1305 (size=443171) 2024-12-02T15:23:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742129_1305 (size=443171) 2024-12-02T15:23:11,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742129_1305 (size=443171) 2024-12-02T15:23:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742130_1306 (size=903849) 2024-12-02T15:23:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742130_1306 (size=903849) 2024-12-02T15:23:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742130_1306 (size=903849) 2024-12-02T15:23:11,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742131_1307 (size=5175431) 2024-12-02T15:23:11,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742131_1307 (size=5175431) 2024-12-02T15:23:11,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742131_1307 (size=5175431) 2024-12-02T15:23:11,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742132_1308 (size=232881) 2024-12-02T15:23:11,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742132_1308 (size=232881) 2024-12-02T15:23:11,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742132_1308 (size=232881) 2024-12-02T15:23:11,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742133_1309 (size=1323991) 2024-12-02T15:23:11,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742133_1309 (size=1323991) 2024-12-02T15:23:11,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742133_1309 (size=1323991) 2024-12-02T15:23:11,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742134_1310 (size=4695811) 2024-12-02T15:23:11,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742134_1310 (size=4695811) 2024-12-02T15:23:11,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742134_1310 (size=4695811) 2024-12-02T15:23:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742135_1311 (size=1877034) 2024-12-02T15:23:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742135_1311 (size=1877034) 2024-12-02T15:23:11,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742135_1311 (size=1877034) 2024-12-02T15:23:11,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742136_1312 (size=217555) 2024-12-02T15:23:11,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742136_1312 (size=217555) 2024-12-02T15:23:11,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742136_1312 (size=217555) 2024-12-02T15:23:11,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742137_1313 (size=4188619) 2024-12-02T15:23:11,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742137_1313 (size=4188619) 2024-12-02T15:23:11,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742137_1313 (size=4188619) 2024-12-02T15:23:11,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742138_1314 (size=127628) 2024-12-02T15:23:11,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742138_1314 (size=127628) 2024-12-02T15:23:11,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742138_1314 (size=127628) 2024-12-02T15:23:11,227 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:23:11,229 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-02T15:23:11,231 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.3 K 2024-12-02T15:23:11,231 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.0 K 2024-12-02T15:23:11,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742139_1315 (size=441) 2024-12-02T15:23:11,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742139_1315 (size=441) 2024-12-02T15:23:11,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742139_1315 (size=441) 2024-12-02T15:23:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742140_1316 (size=21) 2024-12-02T15:23:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742140_1316 (size=21) 2024-12-02T15:23:11,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742140_1316 (size=21) 2024-12-02T15:23:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742141_1317 (size=304132) 2024-12-02T15:23:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742141_1317 (size=304132) 2024-12-02T15:23:11,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742141_1317 (size=304132) 2024-12-02T15:23:15,090 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:23:15,091 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:23:15,096 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0005_000001 (auth:SIMPLE) from 127.0.0.1:35646 2024-12-02T15:23:15,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000001/launch_container.sh] 2024-12-02T15:23:15,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000001/container_tokens] 2024-12-02T15:23:15,107 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0005/container_1733152780087_0005_01_000001/sysfs] 2024-12-02T15:23:15,874 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:41072 2024-12-02T15:23:20,824 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:34562 2024-12-02T15:23:21,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742142_1318 (size=349830) 2024-12-02T15:23:21,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742142_1318 (size=349830) 2024-12-02T15:23:21,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742142_1318 (size=349830) 2024-12-02T15:23:23,390 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:35660 2024-12-02T15:23:23,392 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:41084 2024-12-02T15:23:28,554 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000003/launch_container.sh] 2024-12-02T15:23:28,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000003/container_tokens] 2024-12-02T15:23:28,555 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000003/sysfs] 2024-12-02T15:23:29,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742143_1319 (size=21189) 2024-12-02T15:23:29,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742143_1319 (size=21189) 2024-12-02T15:23:29,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742143_1319 (size=21189) 2024-12-02T15:23:29,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742144_1320 (size=463) 2024-12-02T15:23:29,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742144_1320 (size=463) 2024-12-02T15:23:29,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742144_1320 (size=463) 2024-12-02T15:23:29,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742145_1321 (size=21189) 2024-12-02T15:23:29,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742145_1321 (size=21189) 2024-12-02T15:23:29,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742145_1321 (size=21189) 2024-12-02T15:23:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742146_1322 (size=349830) 2024-12-02T15:23:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742146_1322 (size=349830) 2024-12-02T15:23:29,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742146_1322 (size=349830) 2024-12-02T15:23:29,668 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000002/launch_container.sh] 2024-12-02T15:23:29,669 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000002/container_tokens] 2024-12-02T15:23:29,669 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000002/sysfs] 2024-12-02T15:23:29,691 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:52672 2024-12-02T15:23:31,677 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:23:31,677 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:23:31,680 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-12-02T15:23:31,680 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:23:31,680 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:23:31,680 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T15:23:31,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T15:23:31,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T15:23:31,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@53ba3ed2 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-02T15:23:31,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-02T15:23:31,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733152971304/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-02T15:23:31,694 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-02T15:23:31,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:31,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T15:23:31,697 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153011697"}]},"ts":"1733153011697"} 2024-12-02T15:23:31,699 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-02T15:23:31,699 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-02T15:23:31,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-02T15:23:31,701 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, UNASSIGN}] 2024-12-02T15:23:31,702 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, UNASSIGN 2024-12-02T15:23:31,702 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, UNASSIGN 2024-12-02T15:23:31,702 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=1a2e837a85dcfb13b32ecdc2cd231ae0, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:31,702 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=6ee1e20048f6403ae2d1a265da61a290, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:31,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, UNASSIGN because future has completed 2024-12-02T15:23:31,704 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:31,704 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:31,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, UNASSIGN because future has completed 2024-12-02T15:23:31,705 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:31,705 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:23:31,777 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:23:31,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T15:23:31,856 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 6ee1e20048f6403ae2d1a265da61a290, disabling compactions & flushes 2024-12-02T15:23:31,857 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. after waiting 0 ms 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:23:31,857 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 1a2e837a85dcfb13b32ecdc2cd231ae0, disabling compactions & flushes 2024-12-02T15:23:31,857 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. after waiting 0 ms 2024-12-02T15:23:31,857 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:23:31,861 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:23:31,861 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:23:31,862 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:31,862 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:31,862 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290. 2024-12-02T15:23:31,862 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0. 2024-12-02T15:23:31,862 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 6ee1e20048f6403ae2d1a265da61a290: Waiting for close lock at 1733153011857Running coprocessor pre-close hooks at 1733153011857Disabling compacts and flushes for region at 1733153011857Disabling writes for close at 1733153011857Writing region close event to WAL at 1733153011858 (+1 ms)Running coprocessor post-close hooks at 1733153011862 (+4 ms)Closed at 1733153011862 2024-12-02T15:23:31,862 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 1a2e837a85dcfb13b32ecdc2cd231ae0: Waiting for close lock at 1733153011857Running coprocessor pre-close hooks at 1733153011857Disabling compacts and flushes for region at 1733153011857Disabling writes for close at 1733153011857Writing region close event to WAL at 1733153011858 (+1 ms)Running coprocessor post-close hooks at 1733153011862 (+4 ms)Closed at 1733153011862 2024-12-02T15:23:31,864 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:23:31,865 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=6ee1e20048f6403ae2d1a265da61a290, regionState=CLOSED 2024-12-02T15:23:31,865 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:23:31,866 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=1a2e837a85dcfb13b32ecdc2cd231ae0, regionState=CLOSED 2024-12-02T15:23:31,866 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:31,867 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:23:31,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=127 2024-12-02T15:23:31,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 6ee1e20048f6403ae2d1a265da61a290, server=be0458f36726,43025,1733152773677 in 163 msec 2024-12-02T15:23:31,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=128 2024-12-02T15:23:31,870 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=6ee1e20048f6403ae2d1a265da61a290, UNASSIGN in 168 msec 2024-12-02T15:23:31,870 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 1a2e837a85dcfb13b32ecdc2cd231ae0, server=be0458f36726,35155,1733152773848 in 163 msec 2024-12-02T15:23:31,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-12-02T15:23:31,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=1a2e837a85dcfb13b32ecdc2cd231ae0, UNASSIGN in 169 msec 2024-12-02T15:23:31,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-12-02T15:23:31,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 171 msec 2024-12-02T15:23:31,874 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153011873"}]},"ts":"1733153011873"} 2024-12-02T15:23:31,875 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-02T15:23:31,875 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-02T15:23:31,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 182 msec 2024-12-02T15:23:32,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-12-02T15:23:32,014 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T15:23:32,015 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-02T15:23:32,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,018 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-02T15:23:32,019 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,024 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-02T15:23:32,025 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:23:32,025 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:23:32,027 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/recovered.edits] 2024-12-02T15:23:32,027 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/recovered.edits] 2024-12-02T15:23:32,031 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/cf/2108e06785cf491d9ada00a2c4d37a0f 2024-12-02T15:23:32,031 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/cf/cdc5144365f44db7adaa4aa6d1c66555 2024-12-02T15:23:32,033 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0/recovered.edits/9.seqid 2024-12-02T15:23:32,034 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290/recovered.edits/9.seqid 2024-12-02T15:23:32,034 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/1a2e837a85dcfb13b32ecdc2cd231ae0 2024-12-02T15:23:32,034 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testConsecutiveExports/6ee1e20048f6403ae2d1a265da61a290 2024-12-02T15:23:32,034 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-02T15:23:32,036 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,039 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-02T15:23:32,042 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-02T15:23:32,043 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,043 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-02T15:23:32,043 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153012043"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:32,043 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153012043"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:32,045 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:23:32,045 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6ee1e20048f6403ae2d1a265da61a290, NAME => 'testtb-testConsecutiveExports,,1733152969649.6ee1e20048f6403ae2d1a265da61a290.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1a2e837a85dcfb13b32ecdc2cd231ae0, NAME => 'testtb-testConsecutiveExports,1,1733152969649.1a2e837a85dcfb13b32ecdc2cd231ae0.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:23:32,045 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-02T15:23:32,045 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153012045"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:32,047 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-12-02T15:23:32,047 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-02T15:23:32,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 32 msec 2024-12-02T15:23:32,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,065 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,066 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T15:23:32,066 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-02T15:23:32,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,074 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-02T15:23:32,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,074 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,074 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T15:23:32,074 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:23:32,075 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data null 2024-12-02T15:23:32,075 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-12-02T15:23:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-12-02T15:23:32,076 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-12-02T15:23:32,076 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-12-02T15:23:32,083 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T15:23:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-02T15:23:32,089 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-12-02T15:23:32,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-02T15:23:32,111 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=804 (was 805), OpenFileDescriptor=802 (was 813), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=770 (was 671) - SystemLoadAverage LEAK? -, ProcessCount=18 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=1468 (was 1806) 2024-12-02T15:23:32,111 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-02T15:23:32,126 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=804, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=770, ProcessCount=18, AvailableMemoryMB=1467 2024-12-02T15:23:32,126 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-02T15:23:32,128 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:23:32,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:32,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:23:32,130 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:32,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-12-02T15:23:32,130 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:23:32,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T15:23:32,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742147_1323 (size=422) 2024-12-02T15:23:32,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742147_1323 (size=422) 2024-12-02T15:23:32,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742147_1323 (size=422) 2024-12-02T15:23:32,138 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1829701d319630807ac0982962b00b1b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:32,138 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => ebd17761b06365d94a6efc9193e92c1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:32,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742148_1324 (size=83) 2024-12-02T15:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742148_1324 (size=83) 2024-12-02T15:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742148_1324 (size=83) 2024-12-02T15:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742149_1325 (size=83) 2024-12-02T15:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742149_1325 (size=83) 2024-12-02T15:23:32,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742149_1325 (size=83) 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing 1829701d319630807ac0982962b00b1b, disabling compactions & flushes 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing ebd17761b06365d94a6efc9193e92c1d, disabling compactions & flushes 2024-12-02T15:23:32,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. after waiting 0 ms 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. after waiting 0 ms 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1829701d319630807ac0982962b00b1b: Waiting for close lock at 1733153012149Disabling compacts and flushes for region at 1733153012149Disabling writes for close at 1733153012149Writing region close event to WAL at 1733153012149Closed at 1733153012149 2024-12-02T15:23:32,149 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,149 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for ebd17761b06365d94a6efc9193e92c1d: Waiting for close lock at 1733153012149Disabling compacts and flushes for region at 1733153012149Disabling writes for close at 1733153012149Writing region close event to WAL at 1733153012149Closed at 1733153012149 2024-12-02T15:23:32,150 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:23:32,150 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733153012150"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153012150"}]},"ts":"1733153012150"} 2024-12-02T15:23:32,151 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733153012150"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153012150"}]},"ts":"1733153012150"} 2024-12-02T15:23:32,153 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:23:32,154 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:23:32,154 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153012154"}]},"ts":"1733153012154"} 2024-12-02T15:23:32,156 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-02T15:23:32,156 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:23:32,157 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:23:32,157 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:23:32,157 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:23:32,157 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:23:32,158 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, ASSIGN}] 2024-12-02T15:23:32,159 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, ASSIGN 2024-12-02T15:23:32,159 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, ASSIGN 2024-12-02T15:23:32,159 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:23:32,159 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:23:32,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T15:23:32,310 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:23:32,310 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ebd17761b06365d94a6efc9193e92c1d, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:32,310 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1829701d319630807ac0982962b00b1b, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:32,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, ASSIGN because future has completed 2024-12-02T15:23:32,312 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:23:32,312 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, ASSIGN because future has completed 2024-12-02T15:23:32,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:23:32,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T15:23:32,467 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,467 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,467 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => 1829701d319630807ac0982962b00b1b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:23:32,467 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => ebd17761b06365d94a6efc9193e92c1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. service=AccessControlService 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. service=AccessControlService 2024-12-02T15:23:32,468 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:32,468 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,468 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,470 INFO [StoreOpener-1829701d319630807ac0982962b00b1b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,470 INFO [StoreOpener-ebd17761b06365d94a6efc9193e92c1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,471 INFO [StoreOpener-1829701d319630807ac0982962b00b1b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1829701d319630807ac0982962b00b1b columnFamilyName cf 2024-12-02T15:23:32,471 INFO [StoreOpener-ebd17761b06365d94a6efc9193e92c1d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ebd17761b06365d94a6efc9193e92c1d columnFamilyName cf 2024-12-02T15:23:32,471 DEBUG [StoreOpener-1829701d319630807ac0982962b00b1b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:32,471 DEBUG [StoreOpener-ebd17761b06365d94a6efc9193e92c1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:32,471 INFO [StoreOpener-1829701d319630807ac0982962b00b1b-1 {}] regionserver.HStore(327): Store=1829701d319630807ac0982962b00b1b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:32,471 INFO [StoreOpener-ebd17761b06365d94a6efc9193e92c1d-1 {}] regionserver.HStore(327): Store=ebd17761b06365d94a6efc9193e92c1d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,472 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,473 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,473 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,473 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,473 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,474 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,474 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,483 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:32,483 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:32,483 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened ebd17761b06365d94a6efc9193e92c1d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64373241, jitterRate=-0.04076395928859711}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:32,483 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened 1829701d319630807ac0982962b00b1b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60324653, jitterRate=-0.1010926216840744}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:32,483 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,483 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,484 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for ebd17761b06365d94a6efc9193e92c1d: Running coprocessor pre-open hook at 1733153012468Writing region info on filesystem at 1733153012468Initializing all the Stores at 1733153012469 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153012469Cleaning up temporary data from old regions at 1733153012473 (+4 ms)Running coprocessor post-open hooks at 1733153012483 (+10 ms)Region opened successfully at 1733153012484 (+1 ms) 2024-12-02T15:23:32,484 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for 1829701d319630807ac0982962b00b1b: Running coprocessor pre-open hook at 1733153012468Writing region info on filesystem at 1733153012468Initializing all the Stores at 1733153012469 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153012469Cleaning up temporary data from old regions at 1733153012473 (+4 ms)Running coprocessor post-open hooks at 1733153012483 (+10 ms)Region opened successfully at 1733153012484 (+1 ms) 2024-12-02T15:23:32,485 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b., pid=136, masterSystemTime=1733153012464 2024-12-02T15:23:32,485 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., pid=135, masterSystemTime=1733153012463 2024-12-02T15:23:32,486 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,486 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,487 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=1829701d319630807ac0982962b00b1b, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:32,487 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,487 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,488 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=ebd17761b06365d94a6efc9193e92c1d, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:32,488 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:23:32,489 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:23:32,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-12-02T15:23:32,491 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848 in 176 msec 2024-12-02T15:23:32,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-12-02T15:23:32,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787 in 178 msec 2024-12-02T15:23:32,492 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, ASSIGN in 333 msec 2024-12-02T15:23:32,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=134, resume processing ppid=132 2024-12-02T15:23:32,493 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, ASSIGN in 334 msec 2024-12-02T15:23:32,494 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:23:32,494 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153012494"}]},"ts":"1733153012494"} 2024-12-02T15:23:32,496 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-02T15:23:32,496 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:23:32,497 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-02T15:23:32,500 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T15:23:32,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,648 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:32,732 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:32,732 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:32,732 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:32,732 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:32,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 604 msec 2024-12-02T15:23:32,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-12-02T15:23:32,753 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T15:23:32,753 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-02T15:23:32,753 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:32,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-02T15:23:32,756 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:32,756 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-02T15:23:32,756 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T15:23:32,759 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T15:23:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153012759 (current time:1733153012759). 2024-12-02T15:23:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:23:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T15:23:32,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@642d3105, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:32,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:32,761 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a7a09b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:32,761 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,762 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34194, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:32,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79d61686, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:32,763 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:32,764 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:32,764 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51538, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:32,765 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,765 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b9ac752, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:32,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:32,767 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24d419b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:32,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,768 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:32,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22b3f09a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:32,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:32,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:32,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:32,770 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51544, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:32,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:32,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:32,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34198, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:32,773 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:32,774 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T15:23:32,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:32,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T15:23:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T15:23:32,776 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:32,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T15:23:32,777 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:32,779 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742150_1326 (size=215) 2024-12-02T15:23:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742150_1326 (size=215) 2024-12-02T15:23:32,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742150_1326 (size=215) 2024-12-02T15:23:32,785 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:32,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d}] 2024-12-02T15:23:32,785 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,785 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T15:23:32,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-12-02T15:23:32,937 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-12-02T15:23:32,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,937 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for 1829701d319630807ac0982962b00b1b: 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for ebd17761b06365d94a6efc9193e92c1d: 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:23:32,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:23:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742152_1328 (size=86) 2024-12-02T15:23:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742151_1327 (size=86) 2024-12-02T15:23:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742152_1328 (size=86) 2024-12-02T15:23:32,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742152_1328 (size=86) 2024-12-02T15:23:32,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742151_1327 (size=86) 2024-12-02T15:23:32,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:32,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:32,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-02T15:23:32,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-12-02T15:23:32,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742151_1327 (size=86) 2024-12-02T15:23:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-12-02T15:23:32,951 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-12-02T15:23:32,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,951 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:32,951 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:32,953 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d in 167 msec 2024-12-02T15:23:32,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=138, resume processing ppid=137 2024-12-02T15:23:32,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b in 167 msec 2024-12-02T15:23:32,954 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:32,954 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:32,955 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:32,955 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:32,955 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:32,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742153_1329 (size=597) 2024-12-02T15:23:32,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742153_1329 (size=597) 2024-12-02T15:23:32,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742153_1329 (size=597) 2024-12-02T15:23:33,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T15:23:33,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-02T15:23:33,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-02T15:23:33,366 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:33,371 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:33,371 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,374 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:33,374 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-12-02T15:23:33,375 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 599 msec 2024-12-02T15:23:33,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-12-02T15:23:33,403 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T15:23:33,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='09542ba2b083b61799136bfa681e1085d', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:23:33,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='161ff6f7d0a6efe9f4e1390311ab0e1d8', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:33,410 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='29d128aa4ae5f26fe60e277e0ad1442c7', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:33,411 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='35fa16889be912331f128cc7cf222d1bb', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:33,412 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='468f619529941e75f87a9af6c67f92ff2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:33,413 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion', row='54ea4ce58ec0a1e4682c0d1d698b49773', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:33,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:33,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:33,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T15:23:33,421 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,421 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:33,421 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:33,423 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T15:23:33,429 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T15:23:33,436 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-12-02T15:23:33,440 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T15:23:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153013440 (current time:1733153013440). 2024-12-02T15:23:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:23:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-02T15:23:33,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f9bbe74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:33,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:33,442 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:33,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:33,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:33,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bdaba6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:33,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:33,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,444 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34232, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:33,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6faa5a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:33,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:33,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:33,447 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51550, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:33,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,449 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57d9d708, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:33,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:33,451 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f3e1b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:33,451 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:33,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,452 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:33,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fc47cc8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:33,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:33,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:33,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:33,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51558, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:33,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:33,457 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:33,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34206, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:33,460 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:33,460 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:33,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-02T15:23:33,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:33,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-02T15:23:33,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T15:23:33,462 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:33,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T15:23:33,463 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:33,466 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:33,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742154_1330 (size=210) 2024-12-02T15:23:33,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742154_1330 (size=210) 2024-12-02T15:23:33,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742154_1330 (size=210) 2024-12-02T15:23:33,476 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:33,476 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d}] 2024-12-02T15:23:33,477 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:33,477 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:33,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T15:23:33,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-12-02T15:23:33,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-02T15:23:33,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:33,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:33,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing 1829701d319630807ac0982962b00b1b 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-02T15:23:33,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing ebd17761b06365d94a6efc9193e92c1d 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-02T15:23:33,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/.tmp/cf/85246feab429427393af50359f012eaa is 71, key is 15ea709b0fce0db653c8098d0d45a928/cf:q/1733153013416/Put/seqid=0 2024-12-02T15:23:33,647 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/.tmp/cf/b92af34f3223495396fc1d9e92e4b89a is 69, key is 09542ba2b083b61799136bfa681e1085d/cf:q/1733153013414/Put/seqid=0 2024-12-02T15:23:33,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742155_1331 (size=8460) 2024-12-02T15:23:33,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742155_1331 (size=8460) 2024-12-02T15:23:33,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742155_1331 (size=8460) 2024-12-02T15:23:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742156_1332 (size=5149) 2024-12-02T15:23:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742156_1332 (size=5149) 2024-12-02T15:23:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742156_1332 (size=5149) 2024-12-02T15:23:33,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/.tmp/cf/85246feab429427393af50359f012eaa 2024-12-02T15:23:33,654 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/.tmp/cf/b92af34f3223495396fc1d9e92e4b89a 2024-12-02T15:23:33,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/.tmp/cf/b92af34f3223495396fc1d9e92e4b89a as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a 2024-12-02T15:23:33,659 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/.tmp/cf/85246feab429427393af50359f012eaa as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa 2024-12-02T15:23:33,662 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a, entries=1, sequenceid=6, filesize=5.0 K 2024-12-02T15:23:33,662 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa, entries=49, sequenceid=6, filesize=8.3 K 2024-12-02T15:23:33,663 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for 1829701d319630807ac0982962b00b1b in 34ms, sequenceid=6, compaction requested=false 2024-12-02T15:23:33,663 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for ebd17761b06365d94a6efc9193e92c1d in 34ms, sequenceid=6, compaction requested=false 2024-12-02T15:23:33,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-02T15:23:33,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for 1829701d319630807ac0982962b00b1b: 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for ebd17761b06365d94a6efc9193e92c1d: 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a] hfiles 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa] hfiles 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742157_1333 (size=125) 2024-12-02T15:23:33,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742157_1333 (size=125) 2024-12-02T15:23:33,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742157_1333 (size=125) 2024-12-02T15:23:33,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742158_1334 (size=125) 2024-12-02T15:23:33,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:33,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-12-02T15:23:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742158_1334 (size=125) 2024-12-02T15:23:33,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742158_1334 (size=125) 2024-12-02T15:23:33,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:33,671 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-02T15:23:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-12-02T15:23:33,671 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:33,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-12-02T15:23:33,671 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:33,671 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:33,671 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:33,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 1829701d319630807ac0982962b00b1b in 196 msec 2024-12-02T15:23:33,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=142, resume processing ppid=140 2024-12-02T15:23:33,674 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ebd17761b06365d94a6efc9193e92c1d in 196 msec 2024-12-02T15:23:33,674 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:33,674 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:33,675 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:33,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,675 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742159_1335 (size=675) 2024-12-02T15:23:33,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742159_1335 (size=675) 2024-12-02T15:23:33,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742159_1335 (size=675) 2024-12-02T15:23:33,688 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:33,693 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:33,694 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:33,695 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:33,695 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-12-02T15:23:33,697 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 234 msec 2024-12-02T15:23:33,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-12-02T15:23:33,783 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T15:23:33,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51564, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:23:33,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34216, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:23:33,786 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45206, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-02T15:23:33,787 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:23:33,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:33,789 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:23:33,789 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:33,789 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-12-02T15:23:33,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T15:23:33,789 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:23:33,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742160_1336 (size=399) 2024-12-02T15:23:33,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742160_1336 (size=399) 2024-12-02T15:23:33,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742160_1336 (size=399) 2024-12-02T15:23:33,797 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 046bda82651220493d064993627d2cdf, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:33,797 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 55b8cdac8353563ac0ba4bf6d1405f16, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742161_1337 (size=85) 2024-12-02T15:23:33,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742162_1338 (size=85) 2024-12-02T15:23:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742161_1337 (size=85) 2024-12-02T15:23:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742161_1337 (size=85) 2024-12-02T15:23:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742162_1338 (size=85) 2024-12-02T15:23:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742162_1338 (size=85) 2024-12-02T15:23:33,803 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing 046bda82651220493d064993627d2cdf, disabling compactions & flushes 2024-12-02T15:23:33,804 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. after waiting 0 ms 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:33,804 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for 046bda82651220493d064993627d2cdf: Waiting for close lock at 1733153013804Disabling compacts and flushes for region at 1733153013804Disabling writes for close at 1733153013804Writing region close event to WAL at 1733153013804Closed at 1733153013804 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing 55b8cdac8353563ac0ba4bf6d1405f16, disabling compactions & flushes 2024-12-02T15:23:33,804 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. after waiting 0 ms 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:33,804 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:33,804 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for 55b8cdac8353563ac0ba4bf6d1405f16: Waiting for close lock at 1733153013804Disabling compacts and flushes for region at 1733153013804Disabling writes for close at 1733153013804Writing region close event to WAL at 1733153013804Closed at 1733153013804 2024-12-02T15:23:33,805 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:23:33,805 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733153013805"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153013805"}]},"ts":"1733153013805"} 2024-12-02T15:23:33,805 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733153013805"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153013805"}]},"ts":"1733153013805"} 2024-12-02T15:23:33,808 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:23:33,808 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:23:33,808 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153013808"}]},"ts":"1733153013808"} 2024-12-02T15:23:33,810 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-02T15:23:33,810 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:23:33,811 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:23:33,811 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:23:33,811 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:23:33,811 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:23:33,812 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, ASSIGN}] 2024-12-02T15:23:33,813 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, ASSIGN 2024-12-02T15:23:33,813 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, ASSIGN 2024-12-02T15:23:33,813 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:23:33,813 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:23:33,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T15:23:33,964 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:23:33,964 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=55b8cdac8353563ac0ba4bf6d1405f16, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:33,964 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=046bda82651220493d064993627d2cdf, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:33,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, ASSIGN because future has completed 2024-12-02T15:23:33,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:33,967 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, ASSIGN because future has completed 2024-12-02T15:23:33,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:23:34,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T15:23:34,121 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,121 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,121 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => 55b8cdac8353563ac0ba4bf6d1405f16, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.', STARTKEY => '', ENDKEY => '2'} 2024-12-02T15:23:34,121 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => 046bda82651220493d064993627d2cdf, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.', STARTKEY => '2', ENDKEY => ''} 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. service=AccessControlService 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. service=AccessControlService 2024-12-02T15:23:34,122 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:34,122 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,123 INFO [StoreOpener-55b8cdac8353563ac0ba4bf6d1405f16-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,123 INFO [StoreOpener-046bda82651220493d064993627d2cdf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,125 INFO [StoreOpener-046bda82651220493d064993627d2cdf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 046bda82651220493d064993627d2cdf columnFamilyName cf 2024-12-02T15:23:34,125 INFO [StoreOpener-55b8cdac8353563ac0ba4bf6d1405f16-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 55b8cdac8353563ac0ba4bf6d1405f16 columnFamilyName cf 2024-12-02T15:23:34,125 DEBUG [StoreOpener-046bda82651220493d064993627d2cdf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:34,125 DEBUG [StoreOpener-55b8cdac8353563ac0ba4bf6d1405f16-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:34,126 INFO [StoreOpener-046bda82651220493d064993627d2cdf-1 {}] regionserver.HStore(327): Store=046bda82651220493d064993627d2cdf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:34,126 INFO [StoreOpener-55b8cdac8353563ac0ba4bf6d1405f16-1 {}] regionserver.HStore(327): Store=55b8cdac8353563ac0ba4bf6d1405f16/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:34,126 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,126 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,126 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,126 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,127 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,128 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,128 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,132 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:34,132 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:34,132 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened 55b8cdac8353563ac0ba4bf6d1405f16; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74260756, jitterRate=0.1065714955329895}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:34,132 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened 046bda82651220493d064993627d2cdf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64920396, jitterRate=-0.03261071443557739}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:34,132 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,132 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,133 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for 55b8cdac8353563ac0ba4bf6d1405f16: Running coprocessor pre-open hook at 1733153014122Writing region info on filesystem at 1733153014122Initializing all the Stores at 1733153014123 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153014123Cleaning up temporary data from old regions at 1733153014127 (+4 ms)Running coprocessor post-open hooks at 1733153014132 (+5 ms)Region opened successfully at 1733153014133 (+1 ms) 2024-12-02T15:23:34,133 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for 046bda82651220493d064993627d2cdf: Running coprocessor pre-open hook at 1733153014122Writing region info on filesystem at 1733153014122Initializing all the Stores at 1733153014123 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153014123Cleaning up temporary data from old regions at 1733153014127 (+4 ms)Running coprocessor post-open hooks at 1733153014132 (+5 ms)Region opened successfully at 1733153014133 (+1 ms) 2024-12-02T15:23:34,134 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf., pid=147, masterSystemTime=1733153014119 2024-12-02T15:23:34,134 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16., pid=146, masterSystemTime=1733153014118 2024-12-02T15:23:34,135 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,136 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,136 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=55b8cdac8353563ac0ba4bf6d1405f16, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:34,136 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,136 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,137 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=046bda82651220493d064993627d2cdf, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:34,138 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:34,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:23:34,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-12-02T15:23:34,141 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677 in 173 msec 2024-12-02T15:23:34,141 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-12-02T15:23:34,142 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787 in 173 msec 2024-12-02T15:23:34,142 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, ASSIGN in 330 msec 2024-12-02T15:23:34,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-12-02T15:23:34,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, ASSIGN in 331 msec 2024-12-02T15:23:34,144 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:23:34,144 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153014144"}]},"ts":"1733153014144"} 2024-12-02T15:23:34,146 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-02T15:23:34,147 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:23:34,148 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-02T15:23:34,151 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T15:23:34,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:34,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:34,190 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:34,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,200 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,201 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:34,202 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 412 msec 2024-12-02T15:23:34,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-12-02T15:23:34,413 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T15:23:34,417 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:34,422 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:34,424 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-12-02T15:23:34,438 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf] 2024-12-02T15:23:34,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf], force=true 2024-12-02T15:23:34,445 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf], force=true 2024-12-02T15:23:34,445 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf], force=true 2024-12-02T15:23:34,445 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf], force=true 2024-12-02T15:23:34,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T15:23:34,452 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, UNASSIGN}] 2024-12-02T15:23:34,453 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, UNASSIGN 2024-12-02T15:23:34,453 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, UNASSIGN 2024-12-02T15:23:34,454 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=046bda82651220493d064993627d2cdf, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:34,454 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=55b8cdac8353563ac0ba4bf6d1405f16, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:34,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, UNASSIGN because future has completed 2024-12-02T15:23:34,455 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:34,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:34,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, UNASSIGN because future has completed 2024-12-02T15:23:34,456 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:34,456 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:23:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T15:23:34,608 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,608 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T15:23:34,608 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing 55b8cdac8353563ac0ba4bf6d1405f16, disabling compactions & flushes 2024-12-02T15:23:34,608 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,608 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. after waiting 0 ms 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,609 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing 55b8cdac8353563ac0ba4bf6d1405f16 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T15:23:34,609 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing 046bda82651220493d064993627d2cdf, disabling compactions & flushes 2024-12-02T15:23:34,609 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. after waiting 0 ms 2024-12-02T15:23:34,609 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,609 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing 046bda82651220493d064993627d2cdf 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-02T15:23:34,626 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/.tmp/cf/13879a4e44344a7382fc2c403ac431be is 28, key is 2/cf:/1733153014423/Put/seqid=0 2024-12-02T15:23:34,629 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/.tmp/cf/27ed86fe7e6f42f2b6c6510e3f918f28 is 28, key is 1/cf:/1733153014418/Put/seqid=0 2024-12-02T15:23:34,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742163_1339 (size=4945) 2024-12-02T15:23:34,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742163_1339 (size=4945) 2024-12-02T15:23:34,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742163_1339 (size=4945) 2024-12-02T15:23:34,649 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/.tmp/cf/13879a4e44344a7382fc2c403ac431be 2024-12-02T15:23:34,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742164_1340 (size=4945) 2024-12-02T15:23:34,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742164_1340 (size=4945) 2024-12-02T15:23:34,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742164_1340 (size=4945) 2024-12-02T15:23:34,655 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/.tmp/cf/27ed86fe7e6f42f2b6c6510e3f918f28 2024-12-02T15:23:34,655 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/.tmp/cf/13879a4e44344a7382fc2c403ac431be as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be 2024-12-02T15:23:34,659 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T15:23:34,659 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/.tmp/cf/27ed86fe7e6f42f2b6c6510e3f918f28 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28 2024-12-02T15:23:34,660 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 046bda82651220493d064993627d2cdf in 51ms, sequenceid=5, compaction requested=false 2024-12-02T15:23:34,660 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-02T15:23:34,664 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28, entries=1, sequenceid=5, filesize=4.8 K 2024-12-02T15:23:34,665 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:23:34,666 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 55b8cdac8353563ac0ba4bf6d1405f16 in 56ms, sequenceid=5, compaction requested=false 2024-12-02T15:23:34,666 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:34,666 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. 2024-12-02T15:23:34,666 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for 046bda82651220493d064993627d2cdf: Waiting for close lock at 1733153014609Running coprocessor pre-close hooks at 1733153014609Disabling compacts and flushes for region at 1733153014609Disabling writes for close at 1733153014609Obtaining lock to block concurrent updates at 1733153014609Preparing flush snapshotting stores in 046bda82651220493d064993627d2cdf at 1733153014609Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733153014609Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf. at 1733153014610 (+1 ms)Flushing 046bda82651220493d064993627d2cdf/cf: creating writer at 1733153014610Flushing 046bda82651220493d064993627d2cdf/cf: appending metadata at 1733153014626 (+16 ms)Flushing 046bda82651220493d064993627d2cdf/cf: closing flushed file at 1733153014626Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cfd5fd6: reopening flushed file at 1733153014655 (+29 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 046bda82651220493d064993627d2cdf in 51ms, sequenceid=5, compaction requested=false at 1733153014660 (+5 ms)Writing region close event to WAL at 1733153014661 (+1 ms)Running coprocessor post-close hooks at 1733153014666 (+5 ms)Closed at 1733153014666 2024-12-02T15:23:34,668 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed 046bda82651220493d064993627d2cdf 2024-12-02T15:23:34,669 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=046bda82651220493d064993627d2cdf, regionState=CLOSED 2024-12-02T15:23:34,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:23:34,670 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:23:34,671 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:34,671 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. 2024-12-02T15:23:34,671 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for 55b8cdac8353563ac0ba4bf6d1405f16: Waiting for close lock at 1733153014608Running coprocessor pre-close hooks at 1733153014608Disabling compacts and flushes for region at 1733153014608Disabling writes for close at 1733153014609 (+1 ms)Obtaining lock to block concurrent updates at 1733153014609Preparing flush snapshotting stores in 55b8cdac8353563ac0ba4bf6d1405f16 at 1733153014609Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1733153014609Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16. at 1733153014609Flushing 55b8cdac8353563ac0ba4bf6d1405f16/cf: creating writer at 1733153014610 (+1 ms)Flushing 55b8cdac8353563ac0ba4bf6d1405f16/cf: appending metadata at 1733153014629 (+19 ms)Flushing 55b8cdac8353563ac0ba4bf6d1405f16/cf: closing flushed file at 1733153014629Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17bc4226: reopening flushed file at 1733153014658 (+29 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 55b8cdac8353563ac0ba4bf6d1405f16 in 56ms, sequenceid=5, compaction requested=false at 1733153014666 (+8 ms)Writing region close event to WAL at 1733153014666Running coprocessor post-close hooks at 1733153014671 (+5 ms)Closed at 1733153014671 2024-12-02T15:23:34,673 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed 55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:34,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=150 2024-12-02T15:23:34,673 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure 046bda82651220493d064993627d2cdf, server=be0458f36726,45405,1733152773787 in 215 msec 2024-12-02T15:23:34,673 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=55b8cdac8353563ac0ba4bf6d1405f16, regionState=CLOSED 2024-12-02T15:23:34,674 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=046bda82651220493d064993627d2cdf, UNASSIGN in 221 msec 2024-12-02T15:23:34,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:34,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=149 2024-12-02T15:23:34,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure 55b8cdac8353563ac0ba4bf6d1405f16, server=be0458f36726,43025,1733152773677 in 221 msec 2024-12-02T15:23:34,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-12-02T15:23:34,679 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=55b8cdac8353563ac0ba4bf6d1405f16, UNASSIGN in 225 msec 2024-12-02T15:23:34,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742165_1341 (size=84) 2024-12-02T15:23:34,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742165_1341 (size=84) 2024-12-02T15:23:34,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742165_1341 (size=84) 2024-12-02T15:23:34,692 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742166_1342 (size=20) 2024-12-02T15:23:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742166_1342 (size=20) 2024-12-02T15:23:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742166_1342 (size=20) 2024-12-02T15:23:34,699 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:34,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742167_1343 (size=21) 2024-12-02T15:23:34,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742167_1343 (size=21) 2024-12-02T15:23:34,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742167_1343 (size=21) 2024-12-02T15:23:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742168_1344 (size=84) 2024-12-02T15:23:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742168_1344 (size=84) 2024-12-02T15:23:34,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742168_1344 (size=84) 2024-12-02T15:23:34,711 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:34,719 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-02T15:23:34,720 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013787.55b8cdac8353563ac0ba4bf6d1405f16.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:34,720 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733153013787.046bda82651220493d064993627d2cdf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:34,720 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:34,724 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, ASSIGN}] 2024-12-02T15:23:34,725 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, ASSIGN 2024-12-02T15:23:34,728 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, ASSIGN; state=MERGED, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:23:34,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T15:23:34,878 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-02T15:23:34,878 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=87c318bcdd942ee5ef8fe681967ac54b, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:34,880 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, ASSIGN because future has completed 2024-12-02T15:23:34,880 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:35,035 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:35,035 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 87c318bcdd942ee5ef8fe681967ac54b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.', STARTKEY => '', ENDKEY => ''} 2024-12-02T15:23:35,036 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. service=AccessControlService 2024-12-02T15:23:35,036 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:35,036 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,036 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:35,036 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,036 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,038 INFO [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,039 INFO [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87c318bcdd942ee5ef8fe681967ac54b columnFamilyName cf 2024-12-02T15:23:35,039 DEBUG [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:35,054 DEBUG [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/13879a4e44344a7382fc2c403ac431be.046bda82651220493d064993627d2cdf->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be-top 2024-12-02T15:23:35,059 DEBUG [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/27ed86fe7e6f42f2b6c6510e3f918f28.55b8cdac8353563ac0ba4bf6d1405f16->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28-top 2024-12-02T15:23:35,060 INFO [StoreOpener-87c318bcdd942ee5ef8fe681967ac54b-1 {}] regionserver.HStore(327): Store=87c318bcdd942ee5ef8fe681967ac54b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:35,060 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,063 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,068 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,068 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,068 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,070 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,071 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 87c318bcdd942ee5ef8fe681967ac54b; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59537154, jitterRate=-0.11282727122306824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:35,071 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,072 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 87c318bcdd942ee5ef8fe681967ac54b: Running coprocessor pre-open hook at 1733153015036Writing region info on filesystem at 1733153015037 (+1 ms)Initializing all the Stores at 1733153015037Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153015037Cleaning up temporary data from old regions at 1733153015068 (+31 ms)Running coprocessor post-open hooks at 1733153015071 (+3 ms)Region opened successfully at 1733153015072 (+1 ms) 2024-12-02T15:23:35,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T15:23:35,073 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b., pid=154, masterSystemTime=1733153015032 2024-12-02T15:23:35,074 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.,because compaction is disabled. 2024-12-02T15:23:35,081 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:35,081 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:35,082 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=87c318bcdd942ee5ef8fe681967ac54b, regionState=OPEN, openSeqNum=9, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:35,085 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:35,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-12-02T15:23:35,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677 in 207 msec 2024-12-02T15:23:35,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-12-02T15:23:35,095 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, ASSIGN in 368 msec 2024-12-02T15:23:35,101 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[55b8cdac8353563ac0ba4bf6d1405f16, 046bda82651220493d064993627d2cdf], force=true in 656 msec 2024-12-02T15:23:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-12-02T15:23:35,583 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T15:23:35,584 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T15:23:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153015584 (current time:1733153015584). 2024-12-02T15:23:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:23:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-02T15:23:35,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42946d31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:35,587 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:35,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:35,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:35,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50862c18, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,587 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:35,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:35,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,589 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:35,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b6a2fba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:35,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:35,592 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:35,593 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41030, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:35,594 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ae884c6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:35,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:35,597 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:35,597 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:35,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:35,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:35,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4de43f76, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:35,598 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:35,599 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,599 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:35,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c12b0b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:35,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:35,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:35,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:35,604 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41046, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:35,605 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:35,606 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:35,607 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51528, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:35,608 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:35,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-02T15:23:35,608 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:35,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-02T15:23:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T15:23:35,611 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:35,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T15:23:35,612 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:35,615 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742169_1345 (size=216) 2024-12-02T15:23:35,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742169_1345 (size=216) 2024-12-02T15:23:35,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742169_1345 (size=216) 2024-12-02T15:23:35,639 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:35,640 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b}] 2024-12-02T15:23:35,641 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T15:23:35,777 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0006_000001 (auth:SIMPLE) from 127.0.0.1:42402 2024-12-02T15:23:35,792 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-12-02T15:23:35,792 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 87c318bcdd942ee5ef8fe681967ac54b: 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/13879a4e44344a7382fc2c403ac431be.046bda82651220493d064993627d2cdf->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be-top, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/27ed86fe7e6f42f2b6c6510e3f918f28.55b8cdac8353563ac0ba4bf6d1405f16->hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28-top] hfiles 2024-12-02T15:23:35,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/13879a4e44344a7382fc2c403ac431be.046bda82651220493d064993627d2cdf for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/27ed86fe7e6f42f2b6c6510e3f918f28.55b8cdac8353563ac0ba4bf6d1405f16 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,802 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000001/launch_container.sh] 2024-12-02T15:23:35,802 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000001/container_tokens] 2024-12-02T15:23:35,802 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0006/container_1733152780087_0006_01_000001/sysfs] 2024-12-02T15:23:35,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742170_1346 (size=269) 2024-12-02T15:23:35,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742170_1346 (size=269) 2024-12-02T15:23:35,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742170_1346 (size=269) 2024-12-02T15:23:35,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:35,813 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-02T15:23:35,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-12-02T15:23:35,814 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,814 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:35,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-12-02T15:23:35,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b in 175 msec 2024-12-02T15:23:35,818 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:35,819 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:35,820 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:35,820 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,821 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742171_1347 (size=670) 2024-12-02T15:23:35,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742171_1347 (size=670) 2024-12-02T15:23:35,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742171_1347 (size=670) 2024-12-02T15:23:35,842 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:35,850 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:35,850 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,853 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:35,853 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-12-02T15:23:35,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 244 msec 2024-12-02T15:23:35,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-12-02T15:23:35,923 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T15:23:35,924 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923 2024-12-02T15:23:35,924 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:35,952 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:35,952 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,954 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:23:35,960 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742173_1349 (size=216) 2024-12-02T15:23:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742172_1348 (size=670) 2024-12-02T15:23:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742173_1349 (size=216) 2024-12-02T15:23:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742172_1348 (size=670) 2024-12-02T15:23:35,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742173_1349 (size=216) 2024-12-02T15:23:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742172_1348 (size=670) 2024-12-02T15:23:35,981 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:35,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:35,982 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:36,968 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-1277166441048131310.jar 2024-12-02T15:23:36,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:36,969 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,039 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-15731999789288901531.jar 2024-12-02T15:23:37,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:23:37,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:23:37,042 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:23:37,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:23:37,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:23:37,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:23:37,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:23:37,043 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:37,044 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:23:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742174_1350 (size=24020) 2024-12-02T15:23:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742174_1350 (size=24020) 2024-12-02T15:23:37,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742174_1350 (size=24020) 2024-12-02T15:23:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742175_1351 (size=77755) 2024-12-02T15:23:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742175_1351 (size=77755) 2024-12-02T15:23:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742175_1351 (size=77755) 2024-12-02T15:23:37,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742176_1352 (size=131360) 2024-12-02T15:23:37,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742176_1352 (size=131360) 2024-12-02T15:23:37,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742176_1352 (size=131360) 2024-12-02T15:23:37,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742177_1353 (size=111793) 2024-12-02T15:23:37,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742177_1353 (size=111793) 2024-12-02T15:23:37,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742177_1353 (size=111793) 2024-12-02T15:23:37,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742178_1354 (size=6424745) 2024-12-02T15:23:37,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742178_1354 (size=6424745) 2024-12-02T15:23:37,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742178_1354 (size=6424745) 2024-12-02T15:23:37,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742179_1355 (size=1832290) 2024-12-02T15:23:37,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742179_1355 (size=1832290) 2024-12-02T15:23:37,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742179_1355 (size=1832290) 2024-12-02T15:23:37,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742180_1356 (size=8360005) 2024-12-02T15:23:37,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742180_1356 (size=8360005) 2024-12-02T15:23:37,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742180_1356 (size=8360005) 2024-12-02T15:23:37,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742181_1357 (size=503880) 2024-12-02T15:23:37,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742181_1357 (size=503880) 2024-12-02T15:23:37,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742181_1357 (size=503880) 2024-12-02T15:23:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742182_1358 (size=322274) 2024-12-02T15:23:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742182_1358 (size=322274) 2024-12-02T15:23:37,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742182_1358 (size=322274) 2024-12-02T15:23:37,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742183_1359 (size=20406) 2024-12-02T15:23:37,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742183_1359 (size=20406) 2024-12-02T15:23:37,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742183_1359 (size=20406) 2024-12-02T15:23:37,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742184_1360 (size=45609) 2024-12-02T15:23:37,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742184_1360 (size=45609) 2024-12-02T15:23:37,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742184_1360 (size=45609) 2024-12-02T15:23:37,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742185_1361 (size=136454) 2024-12-02T15:23:37,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742185_1361 (size=136454) 2024-12-02T15:23:37,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742185_1361 (size=136454) 2024-12-02T15:23:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742186_1362 (size=1597136) 2024-12-02T15:23:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742186_1362 (size=1597136) 2024-12-02T15:23:37,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742186_1362 (size=1597136) 2024-12-02T15:23:37,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742187_1363 (size=30873) 2024-12-02T15:23:37,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742187_1363 (size=30873) 2024-12-02T15:23:37,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742187_1363 (size=30873) 2024-12-02T15:23:37,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742188_1364 (size=29229) 2024-12-02T15:23:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742188_1364 (size=29229) 2024-12-02T15:23:37,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742188_1364 (size=29229) 2024-12-02T15:23:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742189_1365 (size=903849) 2024-12-02T15:23:37,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742189_1365 (size=903849) 2024-12-02T15:23:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742189_1365 (size=903849) 2024-12-02T15:23:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742190_1366 (size=443171) 2024-12-02T15:23:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742190_1366 (size=443171) 2024-12-02T15:23:37,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742190_1366 (size=443171) 2024-12-02T15:23:37,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742191_1367 (size=5175431) 2024-12-02T15:23:37,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742191_1367 (size=5175431) 2024-12-02T15:23:37,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742191_1367 (size=5175431) 2024-12-02T15:23:37,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742192_1368 (size=232881) 2024-12-02T15:23:37,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742192_1368 (size=232881) 2024-12-02T15:23:37,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742192_1368 (size=232881) 2024-12-02T15:23:37,391 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:23:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742193_1369 (size=1323991) 2024-12-02T15:23:37,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742193_1369 (size=1323991) 2024-12-02T15:23:37,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742193_1369 (size=1323991) 2024-12-02T15:23:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742194_1370 (size=4695811) 2024-12-02T15:23:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742194_1370 (size=4695811) 2024-12-02T15:23:37,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742194_1370 (size=4695811) 2024-12-02T15:23:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742195_1371 (size=1877034) 2024-12-02T15:23:37,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742195_1371 (size=1877034) 2024-12-02T15:23:37,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742195_1371 (size=1877034) 2024-12-02T15:23:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742196_1372 (size=217555) 2024-12-02T15:23:37,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742196_1372 (size=217555) 2024-12-02T15:23:37,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742196_1372 (size=217555) 2024-12-02T15:23:37,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742197_1373 (size=4188619) 2024-12-02T15:23:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742197_1373 (size=4188619) 2024-12-02T15:23:37,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742197_1373 (size=4188619) 2024-12-02T15:23:37,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742198_1374 (size=127628) 2024-12-02T15:23:37,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742198_1374 (size=127628) 2024-12-02T15:23:37,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742198_1374 (size=127628) 2024-12-02T15:23:37,540 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:23:37,542 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-02T15:23:37,544 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-12-02T15:23:37,544 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-12-02T15:23:37,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742199_1375 (size=481) 2024-12-02T15:23:37,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742199_1375 (size=481) 2024-12-02T15:23:37,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742199_1375 (size=481) 2024-12-02T15:23:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742200_1376 (size=21) 2024-12-02T15:23:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742200_1376 (size=21) 2024-12-02T15:23:37,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742200_1376 (size=21) 2024-12-02T15:23:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742201_1377 (size=304140) 2024-12-02T15:23:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742201_1377 (size=304140) 2024-12-02T15:23:37,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742201_1377 (size=304140) 2024-12-02T15:23:37,597 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:23:37,597 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:23:37,664 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:33312 2024-12-02T15:23:37,776 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1829701d319630807ac0982962b00b1b changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:23:37,776 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ebd17761b06365d94a6efc9193e92c1d changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:23:42,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:46584 2024-12-02T15:23:42,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742202_1378 (size=349838) 2024-12-02T15:23:42,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742202_1378 (size=349838) 2024-12-02T15:23:42,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742202_1378 (size=349838) 2024-12-02T15:23:43,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:43,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-02T15:23:44,807 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:39752 2024-12-02T15:23:44,807 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:57994 2024-12-02T15:23:48,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742203_1379 (size=4945) 2024-12-02T15:23:48,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742203_1379 (size=4945) 2024-12-02T15:23:48,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742203_1379 (size=4945) 2024-12-02T15:23:48,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000002/launch_container.sh] 2024-12-02T15:23:48,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000002/container_tokens] 2024-12-02T15:23:48,138 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000002/sysfs] 2024-12-02T15:23:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742205_1381 (size=4945) 2024-12-02T15:23:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742205_1381 (size=4945) 2024-12-02T15:23:48,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742205_1381 (size=4945) 2024-12-02T15:23:48,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742204_1380 (size=22235) 2024-12-02T15:23:48,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742204_1380 (size=22235) 2024-12-02T15:23:48,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742204_1380 (size=22235) 2024-12-02T15:23:48,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742206_1382 (size=482) 2024-12-02T15:23:48,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742206_1382 (size=482) 2024-12-02T15:23:48,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742206_1382 (size=482) 2024-12-02T15:23:48,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742207_1383 (size=22235) 2024-12-02T15:23:48,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742207_1383 (size=22235) 2024-12-02T15:23:48,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742207_1383 (size=22235) 2024-12-02T15:23:48,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742208_1384 (size=349838) 2024-12-02T15:23:48,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742208_1384 (size=349838) 2024-12-02T15:23:48,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742208_1384 (size=349838) 2024-12-02T15:23:48,751 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:39758 2024-12-02T15:23:48,762 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733152780087_0007_01_000003 is : 143 2024-12-02T15:23:48,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000003/launch_container.sh] 2024-12-02T15:23:48,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000003/container_tokens] 2024-12-02T15:23:48,775 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000003/sysfs] 2024-12-02T15:23:49,813 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:23:49,814 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:23:49,819 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:49,820 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:23:49,820 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:23:49,820 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:49,820 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T15:23:49,820 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T15:23:49,820 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:49,821 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-02T15:23:49,821 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153015923/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-02T15:23:49,826 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:49,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:49,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T15:23:49,829 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153029828"}]},"ts":"1733153029828"} 2024-12-02T15:23:49,830 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-02T15:23:49,830 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-02T15:23:49,831 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-02T15:23:49,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, UNASSIGN}] 2024-12-02T15:23:49,833 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, UNASSIGN 2024-12-02T15:23:49,834 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=87c318bcdd942ee5ef8fe681967ac54b, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:49,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=158, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, UNASSIGN because future has completed 2024-12-02T15:23:49,836 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:49,836 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:49,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T15:23:49,989 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(122): Close 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:49,990 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:23:49,990 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1722): Closing 87c318bcdd942ee5ef8fe681967ac54b, disabling compactions & flushes 2024-12-02T15:23:49,991 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:49,991 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:49,991 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. after waiting 0 ms 2024-12-02T15:23:49,991 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:50,000 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-02T15:23:50,001 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:50,001 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b. 2024-12-02T15:23:50,001 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1676): Region close journal for 87c318bcdd942ee5ef8fe681967ac54b: Waiting for close lock at 1733153029990Running coprocessor pre-close hooks at 1733153029990Disabling compacts and flushes for region at 1733153029990Disabling writes for close at 1733153029991 (+1 ms)Writing region close event to WAL at 1733153029993 (+2 ms)Running coprocessor post-close hooks at 1733153030001 (+8 ms)Closed at 1733153030001 2024-12-02T15:23:50,004 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(157): Closed 87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:50,005 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=159 updating hbase:meta row=87c318bcdd942ee5ef8fe681967ac54b, regionState=CLOSED 2024-12-02T15:23:50,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=160, ppid=159, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:50,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=160, resume processing ppid=159 2024-12-02T15:23:50,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, ppid=159, state=SUCCESS, hasLock=false; CloseRegionProcedure 87c318bcdd942ee5ef8fe681967ac54b, server=be0458f36726,43025,1733152773677 in 172 msec 2024-12-02T15:23:50,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=158 2024-12-02T15:23:50,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=158, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=87c318bcdd942ee5ef8fe681967ac54b, UNASSIGN in 179 msec 2024-12-02T15:23:50,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-12-02T15:23:50,017 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 183 msec 2024-12-02T15:23:50,018 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153030018"}]},"ts":"1733153030018"} 2024-12-02T15:23:50,020 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-02T15:23:50,020 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-02T15:23:50,022 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 195 msec 2024-12-02T15:23:50,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=157 2024-12-02T15:23:50,144 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T15:23:50,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,151 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,153 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,156 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,156 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:50,156 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:50,156 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf 2024-12-02T15:23:50,158 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/recovered.edits] 2024-12-02T15:23:50,158 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/recovered.edits] 2024-12-02T15:23:50,158 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/recovered.edits] 2024-12-02T15:23:50,165 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/13879a4e44344a7382fc2c403ac431be.046bda82651220493d064993627d2cdf to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/13879a4e44344a7382fc2c403ac431be.046bda82651220493d064993627d2cdf 2024-12-02T15:23:50,165 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/cf/27ed86fe7e6f42f2b6c6510e3f918f28 2024-12-02T15:23:50,165 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/cf/13879a4e44344a7382fc2c403ac431be 2024-12-02T15:23:50,166 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/27ed86fe7e6f42f2b6c6510e3f918f28.55b8cdac8353563ac0ba4bf6d1405f16 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/cf/27ed86fe7e6f42f2b6c6510e3f918f28.55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:50,173 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/recovered.edits/8.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf/recovered.edits/8.seqid 2024-12-02T15:23:50,173 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/recovered.edits/8.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16/recovered.edits/8.seqid 2024-12-02T15:23:50,174 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/recovered.edits/12.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b/recovered.edits/12.seqid 2024-12-02T15:23:50,174 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/55b8cdac8353563ac0ba4bf6d1405f16 2024-12-02T15:23:50,174 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/046bda82651220493d064993627d2cdf 2024-12-02T15:23:50,174 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/87c318bcdd942ee5ef8fe681967ac54b 2024-12-02T15:23:50,174 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-02T15:23:50,176 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,178 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-02T15:23:50,180 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-02T15:23:50,181 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,181 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-02T15:23:50,181 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153030181"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:50,183 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-12-02T15:23:50,183 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 87c318bcdd942ee5ef8fe681967ac54b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b.', STARTKEY => '', ENDKEY => ''}] 2024-12-02T15:23:50,183 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-02T15:23:50,183 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153030183"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:50,184 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-02T15:23:50,185 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,186 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 39 msec 2024-12-02T15:23:50,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,198 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T15:23:50,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T15:23:50,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T15:23:50,199 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,206 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,206 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,206 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:50,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:50,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:50,207 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:50,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=161 2024-12-02T15:23:50,208 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,208 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-12-02T15:23:50,209 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=162, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T15:23:50,212 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153030211"}]},"ts":"1733153030211"} 2024-12-02T15:23:50,213 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-02T15:23:50,214 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-02T15:23:50,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-02T15:23:50,216 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, UNASSIGN}, {pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, UNASSIGN}] 2024-12-02T15:23:50,217 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, UNASSIGN 2024-12-02T15:23:50,217 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, UNASSIGN 2024-12-02T15:23:50,218 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ebd17761b06365d94a6efc9193e92c1d, regionState=CLOSING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:50,218 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=1829701d319630807ac0982962b00b1b, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:50,220 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=165, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, UNASSIGN because future has completed 2024-12-02T15:23:50,220 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:50,220 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:23:50,221 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=164, ppid=163, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, UNASSIGN because future has completed 2024-12-02T15:23:50,221 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:23:50,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:23:50,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T15:23:50,374 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(122): Close ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:50,374 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(122): Close 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:50,374 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:23:50,374 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1722): Closing ebd17761b06365d94a6efc9193e92c1d, disabling compactions & flushes 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1722): Closing 1829701d319630807ac0982962b00b1b, disabling compactions & flushes 2024-12-02T15:23:50,375 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:50,375 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. after waiting 0 ms 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. after waiting 0 ms 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:50,375 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:50,383 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:23:50,383 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:50,384 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b. 2024-12-02T15:23:50,384 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] regionserver.HRegion(1676): Region close journal for 1829701d319630807ac0982962b00b1b: Waiting for close lock at 1733153030374Running coprocessor pre-close hooks at 1733153030374Disabling compacts and flushes for region at 1733153030375 (+1 ms)Disabling writes for close at 1733153030375Writing region close event to WAL at 1733153030377 (+2 ms)Running coprocessor post-close hooks at 1733153030383 (+6 ms)Closed at 1733153030383 2024-12-02T15:23:50,384 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:23:50,385 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:23:50,385 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d. 2024-12-02T15:23:50,385 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] regionserver.HRegion(1676): Region close journal for ebd17761b06365d94a6efc9193e92c1d: Waiting for close lock at 1733153030374Running coprocessor pre-close hooks at 1733153030374Disabling compacts and flushes for region at 1733153030374Disabling writes for close at 1733153030375 (+1 ms)Writing region close event to WAL at 1733153030377 (+2 ms)Running coprocessor post-close hooks at 1733153030385 (+8 ms)Closed at 1733153030385 2024-12-02T15:23:50,385 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=167}] handler.UnassignRegionHandler(157): Closed 1829701d319630807ac0982962b00b1b 2024-12-02T15:23:50,386 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=164 updating hbase:meta row=1829701d319630807ac0982962b00b1b, regionState=CLOSED 2024-12-02T15:23:50,386 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=166}] handler.UnassignRegionHandler(157): Closed ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:50,387 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=165 updating hbase:meta row=ebd17761b06365d94a6efc9193e92c1d, regionState=CLOSED 2024-12-02T15:23:50,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=164, state=RUNNABLE, hasLock=false; CloseRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:23:50,388 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=166, ppid=165, state=RUNNABLE, hasLock=false; CloseRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:23:50,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=167, resume processing ppid=164 2024-12-02T15:23:50,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=164, state=SUCCESS, hasLock=false; CloseRegionProcedure 1829701d319630807ac0982962b00b1b, server=be0458f36726,35155,1733152773848 in 167 msec 2024-12-02T15:23:50,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-12-02T15:23:50,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseRegionProcedure ebd17761b06365d94a6efc9193e92c1d, server=be0458f36726,45405,1733152773787 in 169 msec 2024-12-02T15:23:50,391 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1829701d319630807ac0982962b00b1b, UNASSIGN in 174 msec 2024-12-02T15:23:50,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=165, resume processing ppid=163 2024-12-02T15:23:50,392 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, ppid=163, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=ebd17761b06365d94a6efc9193e92c1d, UNASSIGN in 175 msec 2024-12-02T15:23:50,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-12-02T15:23:50,394 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 179 msec 2024-12-02T15:23:50,396 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153030395"}]},"ts":"1733153030395"} 2024-12-02T15:23:50,397 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-02T15:23:50,397 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-02T15:23:50,399 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 189 msec 2024-12-02T15:23:50,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=162 2024-12-02T15:23:50,523 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T15:23:50,523 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,525 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=168, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,526 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=168, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,528 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,530 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b 2024-12-02T15:23:50,531 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:50,534 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/recovered.edits] 2024-12-02T15:23:50,534 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/recovered.edits] 2024-12-02T15:23:50,560 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/cf/85246feab429427393af50359f012eaa 2024-12-02T15:23:50,560 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/cf/b92af34f3223495396fc1d9e92e4b89a 2024-12-02T15:23:50,562 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b/recovered.edits/9.seqid 2024-12-02T15:23:50,562 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d/recovered.edits/9.seqid 2024-12-02T15:23:50,563 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/1829701d319630807ac0982962b00b1b 2024-12-02T15:23:50,563 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithMergeRegion/ebd17761b06365d94a6efc9193e92c1d 2024-12-02T15:23:50,563 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-02T15:23:50,600 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=168, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,602 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-02T15:23:50,604 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-02T15:23:50,605 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=168, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,606 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-02T15:23:50,606 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153030606"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:50,606 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153030606"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:50,608 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:23:50,608 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 1829701d319630807ac0982962b00b1b, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733153012127.1829701d319630807ac0982962b00b1b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => ebd17761b06365d94a6efc9193e92c1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733153012127.ebd17761b06365d94a6efc9193e92c1d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:23:50,608 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-02T15:23:50,608 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153030608"}]},"ts":"9223372036854775807"} 2024-12-02T15:23:50,610 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-02T15:23:50,610 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=168, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,612 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 86 msec 2024-12-02T15:23:50,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,639 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T15:23:50,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T15:23:50,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T15:23:50,640 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,647 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,647 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:50,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=168 2024-12-02T15:23:50,649 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,649 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-12-02T15:23:50,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T15:23:50,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,663 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-12-02T15:23:50,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:50,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-12-02T15:23:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:50,690 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=818 (was 804) Potentially hanging thread: process reaper (pid 131986) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1147827230_1 at /127.0.0.1:58218 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:58246 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5761 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:46423 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42837 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:42837 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:36880 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:53038 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=826 (was 802) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=732 (was 770), ProcessCount=19 (was 18) - ProcessCount LEAK? -, AvailableMemoryMB=1271 (was 1467) 2024-12-02T15:23:50,690 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-12-02T15:23:50,709 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=818, OpenFileDescriptor=826, MaxFileDescriptor=1048576, SystemLoadAverage=732, ProcessCount=19, AvailableMemoryMB=1271 2024-12-02T15:23:50,709 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=818 is superior to 500 2024-12-02T15:23:50,710 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:23:50,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:23:50,712 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:23:50,713 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:50,713 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 169 2024-12-02T15:23:50,713 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:23:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T15:23:50,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742209_1385 (size=407) 2024-12-02T15:23:50,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742209_1385 (size=407) 2024-12-02T15:23:50,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742209_1385 (size=407) 2024-12-02T15:23:50,733 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => c5999f0e0ea17043a3e07c6712f1e8f0, NAME => 'testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:50,733 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 87e205a09dc0f6af36a7f0bac5bd0ba3, NAME => 'testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742210_1386 (size=68) 2024-12-02T15:23:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742210_1386 (size=68) 2024-12-02T15:23:50,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742210_1386 (size=68) 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 87e205a09dc0f6af36a7f0bac5bd0ba3, disabling compactions & flushes 2024-12-02T15:23:50,768 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. after waiting 0 ms 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:50,768 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:50,768 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 87e205a09dc0f6af36a7f0bac5bd0ba3: Waiting for close lock at 1733153030768Disabling compacts and flushes for region at 1733153030768Disabling writes for close at 1733153030768Writing region close event to WAL at 1733153030768Closed at 1733153030768 2024-12-02T15:23:50,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742211_1387 (size=68) 2024-12-02T15:23:50,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742211_1387 (size=68) 2024-12-02T15:23:50,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742211_1387 (size=68) 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing c5999f0e0ea17043a3e07c6712f1e8f0, disabling compactions & flushes 2024-12-02T15:23:50,776 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. after waiting 0 ms 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:50,776 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:50,776 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for c5999f0e0ea17043a3e07c6712f1e8f0: Waiting for close lock at 1733153030776Disabling compacts and flushes for region at 1733153030776Disabling writes for close at 1733153030776Writing region close event to WAL at 1733153030776Closed at 1733153030776 2024-12-02T15:23:50,777 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:23:50,778 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733153030778"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153030778"}]},"ts":"1733153030778"} 2024-12-02T15:23:50,778 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733153030778"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153030778"}]},"ts":"1733153030778"} 2024-12-02T15:23:50,781 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:23:50,782 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:23:50,782 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153030782"}]},"ts":"1733153030782"} 2024-12-02T15:23:50,784 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T15:23:50,784 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:23:50,785 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:23:50,785 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:23:50,785 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:23:50,785 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:23:50,786 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:23:50,786 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:23:50,786 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:23:50,786 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:23:50,786 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:23:50,786 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:23:50,786 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, ASSIGN}, {pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, ASSIGN}] 2024-12-02T15:23:50,787 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, ASSIGN 2024-12-02T15:23:50,787 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, ASSIGN 2024-12-02T15:23:50,788 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:23:50,788 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:23:50,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T15:23:50,939 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:23:50,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=c5999f0e0ea17043a3e07c6712f1e8f0, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:50,939 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=87e205a09dc0f6af36a7f0bac5bd0ba3, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:50,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=171, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, ASSIGN because future has completed 2024-12-02T15:23:50,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:23:50,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=169, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, ASSIGN because future has completed 2024-12-02T15:23:50,946 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:23:51,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T15:23:51,106 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,106 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7752): Opening region: {ENCODED => 87e205a09dc0f6af36a7f0bac5bd0ba3, NAME => 'testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:23:51,107 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,107 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7752): Opening region: {ENCODED => c5999f0e0ea17043a3e07c6712f1e8f0, NAME => 'testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:23:51,107 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. service=AccessControlService 2024-12-02T15:23:51,107 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:51,107 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. service=AccessControlService 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:51,108 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7794): checking encryption for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(7797): checking classloading for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,108 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:51,109 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7794): checking encryption for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,109 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(7797): checking classloading for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,111 INFO [StoreOpener-87e205a09dc0f6af36a7f0bac5bd0ba3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,111 INFO [StoreOpener-c5999f0e0ea17043a3e07c6712f1e8f0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,113 INFO [StoreOpener-87e205a09dc0f6af36a7f0bac5bd0ba3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 87e205a09dc0f6af36a7f0bac5bd0ba3 columnFamilyName cf 2024-12-02T15:23:51,113 INFO [StoreOpener-c5999f0e0ea17043a3e07c6712f1e8f0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c5999f0e0ea17043a3e07c6712f1e8f0 columnFamilyName cf 2024-12-02T15:23:51,113 DEBUG [StoreOpener-c5999f0e0ea17043a3e07c6712f1e8f0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:51,113 DEBUG [StoreOpener-87e205a09dc0f6af36a7f0bac5bd0ba3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:51,113 INFO [StoreOpener-c5999f0e0ea17043a3e07c6712f1e8f0-1 {}] regionserver.HStore(327): Store=c5999f0e0ea17043a3e07c6712f1e8f0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:51,113 INFO [StoreOpener-87e205a09dc0f6af36a7f0bac5bd0ba3-1 {}] regionserver.HStore(327): Store=87e205a09dc0f6af36a7f0bac5bd0ba3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:51,113 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1038): replaying wal for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,113 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1038): replaying wal for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,114 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,114 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,114 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,114 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,115 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1048): stopping wal replay for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,115 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1048): stopping wal replay for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,115 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1060): Cleaning up temporary data for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,115 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1060): Cleaning up temporary data for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,116 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1093): writing seq id for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,116 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1093): writing seq id for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,118 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:51,118 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:51,118 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1114): Opened c5999f0e0ea17043a3e07c6712f1e8f0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67682356, jitterRate=0.00854569673538208}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:51,118 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1114): Opened 87e205a09dc0f6af36a7f0bac5bd0ba3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74998139, jitterRate=0.11755935847759247}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:51,118 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,118 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,119 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegion(1006): Region open journal for c5999f0e0ea17043a3e07c6712f1e8f0: Running coprocessor pre-open hook at 1733153031109Writing region info on filesystem at 1733153031109Initializing all the Stores at 1733153031110 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153031110Cleaning up temporary data from old regions at 1733153031115 (+5 ms)Running coprocessor post-open hooks at 1733153031118 (+3 ms)Region opened successfully at 1733153031119 (+1 ms) 2024-12-02T15:23:51,119 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegion(1006): Region open journal for 87e205a09dc0f6af36a7f0bac5bd0ba3: Running coprocessor pre-open hook at 1733153031109Writing region info on filesystem at 1733153031109Initializing all the Stores at 1733153031110 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153031110Cleaning up temporary data from old regions at 1733153031115 (+5 ms)Running coprocessor post-open hooks at 1733153031118 (+3 ms)Region opened successfully at 1733153031119 (+1 ms) 2024-12-02T15:23:51,120 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0., pid=173, masterSystemTime=1733153031100 2024-12-02T15:23:51,120 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., pid=172, masterSystemTime=1733153031098 2024-12-02T15:23:51,122 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,122 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=173}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,123 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=170 updating hbase:meta row=c5999f0e0ea17043a3e07c6712f1e8f0, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:51,123 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,123 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=172}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,124 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=171 updating hbase:meta row=87e205a09dc0f6af36a7f0bac5bd0ba3, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:23:51,124 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=170, state=RUNNABLE, hasLock=false; OpenRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:23:51,125 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=172, ppid=171, state=RUNNABLE, hasLock=false; OpenRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:23:51,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=170 2024-12-02T15:23:51,127 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=170, state=SUCCESS, hasLock=false; OpenRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848 in 180 msec 2024-12-02T15:23:51,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=172, resume processing ppid=171 2024-12-02T15:23:51,128 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, ppid=171, state=SUCCESS, hasLock=false; OpenRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677 in 182 msec 2024-12-02T15:23:51,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, ASSIGN in 341 msec 2024-12-02T15:23:51,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=171, resume processing ppid=169 2024-12-02T15:23:51,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, ppid=169, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, ASSIGN in 342 msec 2024-12-02T15:23:51,130 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:23:51,130 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153031130"}]},"ts":"1733153031130"} 2024-12-02T15:23:51,132 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T15:23:51,132 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=169, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:23:51,132 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T15:23:51,136 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T15:23:51,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:51,164 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:51,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:51,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:51,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:51,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:51,173 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:51,174 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:51,176 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 462 msec 2024-12-02T15:23:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=169 2024-12-02T15:23:51,345 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T15:23:51,345 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-02T15:23:51,346 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:51,355 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-02T15:23:51,356 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:51,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-02T15:23:51,356 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:51,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T15:23:51,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153031358 (current time:1733153031358). 2024-12-02T15:23:51,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:23:51,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T15:23:51,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:51,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d3a83a7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:51,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:51,360 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:51,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:51,360 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:51,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36ed4c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:51,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:51,361 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,362 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42678, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:51,362 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8a1dc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:51,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:51,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,364 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51902, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,365 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:51,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:51,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,365 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f46e672, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:51,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:51,367 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56496abe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:51,367 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,368 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42696, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:51,368 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@218a68f1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:51,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:51,370 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,371 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51904, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,372 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,373 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,373 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,374 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:23:51,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:51,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,374 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:51,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T15:23:51,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T15:23:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T15:23:51,377 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:51,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T15:23:51,378 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:51,380 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:51,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742212_1388 (size=170) 2024-12-02T15:23:51,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742212_1388 (size=170) 2024-12-02T15:23:51,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742212_1388 (size=170) 2024-12-02T15:23:51,387 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:51,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0}, {pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3}] 2024-12-02T15:23:51,387 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,387 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T15:23:51,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=175 2024-12-02T15:23:51,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=176 2024-12-02T15:23:51,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.HRegion(2603): Flush status journal for c5999f0e0ea17043a3e07c6712f1e8f0: 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.HRegion(2603): Flush status journal for 87e205a09dc0f6af36a7f0bac5bd0ba3: 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:51,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:23:51,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:51,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:23:51,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742213_1389 (size=71) 2024-12-02T15:23:51,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742213_1389 (size=71) 2024-12-02T15:23:51,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742214_1390 (size=71) 2024-12-02T15:23:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742214_1390 (size=71) 2024-12-02T15:23:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742213_1389 (size=71) 2024-12-02T15:23:51,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742214_1390 (size=71) 2024-12-02T15:23:51,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=175}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=175 2024-12-02T15:23:51,569 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-12-02T15:23:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=176 2024-12-02T15:23:51,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=175 2024-12-02T15:23:51,570 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,570 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,570 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=175, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,570 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=176, ppid=174, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 in 184 msec 2024-12-02T15:23:51,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=174 2024-12-02T15:23:51,574 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=174, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 in 184 msec 2024-12-02T15:23:51,574 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:51,575 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:51,576 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:51,576 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,577 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742215_1391 (size=552) 2024-12-02T15:23:51,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742215_1391 (size=552) 2024-12-02T15:23:51,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742215_1391 (size=552) 2024-12-02T15:23:51,592 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:51,597 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:51,598 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,599 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=174, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:51,599 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 174 2024-12-02T15:23:51,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=174, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 224 msec 2024-12-02T15:23:51,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=174 2024-12-02T15:23:51,694 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T15:23:51,703 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='0d3876445fb60e214ec071a7472439323', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:23:51,704 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='137d06e4d2cfbefa6008cf3ec3c471b3e', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,705 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='35263a9739927051239c427a2dc5e6a1d', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,706 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='208ba8f4080f8591d77a68ae47be29b25', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,707 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='4d369f6c62415318487f40dced02885ca', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,708 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportExpiredSnapshot', row='5655f1ab9499cf5724695d54785e30ed2', locateType=CURRENT is [region=testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,710 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:51,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:51,714 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:51,717 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-02T15:23:51,717 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,717 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:51,719 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:51,724 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:51,730 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:51,733 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153031733 (current time:1733153031733). 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d8bef8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:51,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:51,734 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:51,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:51,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:51,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@40508b5d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:51,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:51,735 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,736 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42708, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:51,736 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36bee960, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:51,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:51,737 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,738 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51912, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,739 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63af4ed5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:51,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:51,740 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:51,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:51,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:51,740 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3db7946e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:51,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:51,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,741 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:51,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@156aff5b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:51,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:51,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,744 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51924, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:51,746 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:51,747 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53342, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:51,748 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:51,748 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:51,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T15:23:51,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-02T15:23:51,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T15:23:51,750 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:51,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T15:23:51,751 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:51,753 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:51,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742216_1392 (size=165) 2024-12-02T15:23:51,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742216_1392 (size=165) 2024-12-02T15:23:51,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742216_1392 (size=165) 2024-12-02T15:23:51,766 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:51,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3}] 2024-12-02T15:23:51,767 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,767 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T15:23:51,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-12-02T15:23:51,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-12-02T15:23:51,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,921 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2902): Flushing c5999f0e0ea17043a3e07c6712f1e8f0 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-02T15:23:51,921 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2902): Flushing 87e205a09dc0f6af36a7f0bac5bd0ba3 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-02T15:23:51,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/.tmp/cf/1fe2ab85a78f4c9eab56043b1c2aeafa is 71, key is 102187d0176f915284e7e8ee3c17af80/cf:q/1733153031713/Put/seqid=0 2024-12-02T15:23:51,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/.tmp/cf/26a535b33fa54481a865ddcc4c148bce is 71, key is 06e4a80005ed4333ac4925befae41f9a/cf:q/1733153031710/Put/seqid=0 2024-12-02T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742217_1393 (size=8392) 2024-12-02T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742218_1394 (size=5216) 2024-12-02T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742218_1394 (size=5216) 2024-12-02T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742217_1393 (size=8392) 2024-12-02T15:23:51,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742217_1393 (size=8392) 2024-12-02T15:23:51,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742218_1394 (size=5216) 2024-12-02T15:23:51,946 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/.tmp/cf/26a535b33fa54481a865ddcc4c148bce 2024-12-02T15:23:51,950 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/.tmp/cf/26a535b33fa54481a865ddcc4c148bce as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce 2024-12-02T15:23:51,951 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/.tmp/cf/1fe2ab85a78f4c9eab56043b1c2aeafa 2024-12-02T15:23:51,954 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce, entries=2, sequenceid=6, filesize=5.1 K 2024-12-02T15:23:51,955 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for c5999f0e0ea17043a3e07c6712f1e8f0 in 35ms, sequenceid=6, compaction requested=false 2024-12-02T15:23:51,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-02T15:23:51,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for c5999f0e0ea17043a3e07c6712f1e8f0: 2024-12-02T15:23:51,955 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T15:23:51,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:51,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce] hfiles 2024-12-02T15:23:51,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/.tmp/cf/1fe2ab85a78f4c9eab56043b1c2aeafa as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa 2024-12-02T15:23:51,956 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,960 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa, entries=48, sequenceid=6, filesize=8.2 K 2024-12-02T15:23:51,961 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 87e205a09dc0f6af36a7f0bac5bd0ba3 in 41ms, sequenceid=6, compaction requested=false 2024-12-02T15:23:51,961 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 87e205a09dc0f6af36a7f0bac5bd0ba3: 2024-12-02T15:23:51,961 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. for snaptb0-testExportExpiredSnapshot completed. 2024-12-02T15:23:51,961 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:51,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa] hfiles 2024-12-02T15:23:51,962 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742219_1395 (size=110) 2024-12-02T15:23:51,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742219_1395 (size=110) 2024-12-02T15:23:51,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742219_1395 (size=110) 2024-12-02T15:23:51,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:23:51,965 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-12-02T15:23:51,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-12-02T15:23:51,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,966 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:23:51,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742220_1396 (size=110) 2024-12-02T15:23:51,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742220_1396 (size=110) 2024-12-02T15:23:51,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742220_1396 (size=110) 2024-12-02T15:23:51,968 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0 in 200 msec 2024-12-02T15:23:51,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:23:51,968 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-12-02T15:23:51,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-12-02T15:23:51,969 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,969 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:23:51,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-12-02T15:23:51,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3 in 203 msec 2024-12-02T15:23:51,971 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:51,971 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:51,972 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:51,972 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,972 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742221_1397 (size=630) 2024-12-02T15:23:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742221_1397 (size=630) 2024-12-02T15:23:51,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742221_1397 (size=630) 2024-12-02T15:23:51,986 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:51,990 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:51,990 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-02T15:23:51,992 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:51,992 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-12-02T15:23:51,993 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 243 msec 2024-12-02T15:23:52,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-12-02T15:23:52,063 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T15:23:52,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:23:52,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-02T15:23:52,068 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:23:52,068 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:52,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 180 2024-12-02T15:23:52,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T15:23:52,069 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:23:52,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742222_1398 (size=400) 2024-12-02T15:23:52,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742222_1398 (size=400) 2024-12-02T15:23:52,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742222_1398 (size=400) 2024-12-02T15:23:52,081 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a07dd0e3653e17be2e0e596d280713d4, NAME => 'testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:52,082 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 9d2126893e3de75703fde3d34993d9c3, NAME => 'testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:23:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742224_1400 (size=61) 2024-12-02T15:23:52,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742224_1400 (size=61) 2024-12-02T15:23:52,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742224_1400 (size=61) 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing a07dd0e3653e17be2e0e596d280713d4, disabling compactions & flushes 2024-12-02T15:23:52,094 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742223_1399 (size=61) 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. after waiting 0 ms 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,094 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,094 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for a07dd0e3653e17be2e0e596d280713d4: Waiting for close lock at 1733153032094Disabling compacts and flushes for region at 1733153032094Disabling writes for close at 1733153032094Writing region close event to WAL at 1733153032094Closed at 1733153032094 2024-12-02T15:23:52,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742223_1399 (size=61) 2024-12-02T15:23:52,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742223_1399 (size=61) 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 9d2126893e3de75703fde3d34993d9c3, disabling compactions & flushes 2024-12-02T15:23:52,095 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. after waiting 0 ms 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,095 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,095 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 9d2126893e3de75703fde3d34993d9c3: Waiting for close lock at 1733153032095Disabling compacts and flushes for region at 1733153032095Disabling writes for close at 1733153032095Writing region close event to WAL at 1733153032095Closed at 1733153032095 2024-12-02T15:23:52,096 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:23:52,097 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733153032096"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153032096"}]},"ts":"1733153032096"} 2024-12-02T15:23:52,097 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733153032096"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153032096"}]},"ts":"1733153032096"} 2024-12-02T15:23:52,099 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:23:52,099 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:23:52,100 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153032099"}]},"ts":"1733153032099"} 2024-12-02T15:23:52,101 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-02T15:23:52,101 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:23:52,102 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:23:52,102 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:23:52,102 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:23:52,102 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:23:52,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a07dd0e3653e17be2e0e596d280713d4, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9d2126893e3de75703fde3d34993d9c3, ASSIGN}] 2024-12-02T15:23:52,103 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a07dd0e3653e17be2e0e596d280713d4, ASSIGN 2024-12-02T15:23:52,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9d2126893e3de75703fde3d34993d9c3, ASSIGN 2024-12-02T15:23:52,104 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a07dd0e3653e17be2e0e596d280713d4, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:23:52,104 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9d2126893e3de75703fde3d34993d9c3, ASSIGN; state=OFFLINE, location=be0458f36726,45405,1733152773787; forceNewPlan=false, retain=false 2024-12-02T15:23:52,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T15:23:52,255 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:23:52,256 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=9d2126893e3de75703fde3d34993d9c3, regionState=OPENING, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:52,256 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=a07dd0e3653e17be2e0e596d280713d4, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:52,260 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9d2126893e3de75703fde3d34993d9c3, ASSIGN because future has completed 2024-12-02T15:23:52,261 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9d2126893e3de75703fde3d34993d9c3, server=be0458f36726,45405,1733152773787}] 2024-12-02T15:23:52,263 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a07dd0e3653e17be2e0e596d280713d4, ASSIGN because future has completed 2024-12-02T15:23:52,264 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure a07dd0e3653e17be2e0e596d280713d4, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:23:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T15:23:52,421 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,422 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7752): Opening region: {ENCODED => 9d2126893e3de75703fde3d34993d9c3, NAME => 'testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:23:52,422 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,422 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7752): Opening region: {ENCODED => a07dd0e3653e17be2e0e596d280713d4, NAME => 'testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:23:52,422 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. service=AccessControlService 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. service=AccessControlService 2024-12-02T15:23:52,423 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,423 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7794): checking encryption for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7797): checking classloading for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,423 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,424 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:23:52,424 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7794): checking encryption for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,424 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7797): checking classloading for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,425 INFO [StoreOpener-9d2126893e3de75703fde3d34993d9c3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,425 INFO [StoreOpener-a07dd0e3653e17be2e0e596d280713d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,426 INFO [StoreOpener-a07dd0e3653e17be2e0e596d280713d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a07dd0e3653e17be2e0e596d280713d4 columnFamilyName cf 2024-12-02T15:23:52,426 INFO [StoreOpener-9d2126893e3de75703fde3d34993d9c3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9d2126893e3de75703fde3d34993d9c3 columnFamilyName cf 2024-12-02T15:23:52,427 DEBUG [StoreOpener-a07dd0e3653e17be2e0e596d280713d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:52,427 DEBUG [StoreOpener-9d2126893e3de75703fde3d34993d9c3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:23:52,427 INFO [StoreOpener-9d2126893e3de75703fde3d34993d9c3-1 {}] regionserver.HStore(327): Store=9d2126893e3de75703fde3d34993d9c3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:52,427 INFO [StoreOpener-a07dd0e3653e17be2e0e596d280713d4-1 {}] regionserver.HStore(327): Store=a07dd0e3653e17be2e0e596d280713d4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:23:52,427 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1038): replaying wal for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,427 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1038): replaying wal for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,428 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,428 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,428 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,428 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,429 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1048): stopping wal replay for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,429 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1060): Cleaning up temporary data for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,429 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1048): stopping wal replay for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,429 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1060): Cleaning up temporary data for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,430 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1093): writing seq id for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,430 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1093): writing seq id for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,432 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:52,432 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:23:52,433 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1114): Opened 9d2126893e3de75703fde3d34993d9c3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71916090, jitterRate=0.0716332495212555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:52,433 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1114): Opened a07dd0e3653e17be2e0e596d280713d4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72677414, jitterRate=0.08297786116600037}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:23:52,433 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,433 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,433 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1006): Region open journal for 9d2126893e3de75703fde3d34993d9c3: Running coprocessor pre-open hook at 1733153032424Writing region info on filesystem at 1733153032424Initializing all the Stores at 1733153032425 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153032425Cleaning up temporary data from old regions at 1733153032429 (+4 ms)Running coprocessor post-open hooks at 1733153032433 (+4 ms)Region opened successfully at 1733153032433 2024-12-02T15:23:52,433 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1006): Region open journal for a07dd0e3653e17be2e0e596d280713d4: Running coprocessor pre-open hook at 1733153032424Writing region info on filesystem at 1733153032424Initializing all the Stores at 1733153032425 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153032425Cleaning up temporary data from old regions at 1733153032429 (+4 ms)Running coprocessor post-open hooks at 1733153032433 (+4 ms)Region opened successfully at 1733153032433 2024-12-02T15:23:52,434 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4., pid=184, masterSystemTime=1733153032416 2024-12-02T15:23:52,434 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3., pid=183, masterSystemTime=1733153032415 2024-12-02T15:23:52,436 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,436 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,436 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=182 updating hbase:meta row=9d2126893e3de75703fde3d34993d9c3, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,45405,1733152773787 2024-12-02T15:23:52,437 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,437 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,437 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=181 updating hbase:meta row=a07dd0e3653e17be2e0e596d280713d4, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:23:52,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=183, ppid=182, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9d2126893e3de75703fde3d34993d9c3, server=be0458f36726,45405,1733152773787 because future has completed 2024-12-02T15:23:52,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=181, state=RUNNABLE, hasLock=false; OpenRegionProcedure a07dd0e3653e17be2e0e596d280713d4, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:23:52,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=183, resume processing ppid=182 2024-12-02T15:23:52,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, ppid=182, state=SUCCESS, hasLock=false; OpenRegionProcedure 9d2126893e3de75703fde3d34993d9c3, server=be0458f36726,45405,1733152773787 in 178 msec 2024-12-02T15:23:52,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=184, resume processing ppid=181 2024-12-02T15:23:52,442 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=181, state=SUCCESS, hasLock=false; OpenRegionProcedure a07dd0e3653e17be2e0e596d280713d4, server=be0458f36726,35155,1733152773848 in 176 msec 2024-12-02T15:23:52,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=9d2126893e3de75703fde3d34993d9c3, ASSIGN in 339 msec 2024-12-02T15:23:52,444 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=181, resume processing ppid=180 2024-12-02T15:23:52,444 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=a07dd0e3653e17be2e0e596d280713d4, ASSIGN in 340 msec 2024-12-02T15:23:52,445 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:23:52,445 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153032445"}]},"ts":"1733153032445"} 2024-12-02T15:23:52,447 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-02T15:23:52,448 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:23:52,448 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-02T15:23:52,450 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T15:23:52,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:52,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:52,481 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:52,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:23:52,518 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,518 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,518 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,518 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,519 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,519 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,519 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,520 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:23:52,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 450 msec 2024-12-02T15:23:52,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-12-02T15:23:52,694 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T15:23:52,695 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-02T15:23:52,695 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:52,703 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-02T15:23:52,704 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:52,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testExportExpiredSnapshot assigned. 2024-12-02T15:23:52,704 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:52,714 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='02beb5a8f8b94e65a72475a77d4d01fa1', locateType=CURRENT is [region=testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:23:52,715 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='1f9e792d730861ef6765b532d7fe20086', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:52,716 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='234cd46e02c7ef0029436098fb40760fc', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:52,717 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportExpiredSnapshot', row='302df2f436406564b39958f4e831fa47f', locateType=CURRENT is [region=testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3., hostname=be0458f36726,45405,1733152773787, seqNum=2] 2024-12-02T15:23:52,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:52,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45405 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:23:52,726 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:52,728 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-12-02T15:23:52,728 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,729 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:23:52,730 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:52,734 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-12-02T15:23:52,739 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T15:23:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-02T15:23:52,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:23:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c41a353, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:52,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:52,741 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:52,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:52,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:52,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ab4d45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:52,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:52,742 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,742 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42728, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:52,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@397ad5f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:52,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:52,744 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:52,745 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51936, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:52,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,746 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fae22ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:23:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:23:52,747 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:23:52,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:23:52,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:23:52,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f5be683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,747 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:23:52,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:23:52,748 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,748 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42744, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:23:52,749 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4694451d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:23:52,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:23:52,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:23:52,750 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:52,751 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51942, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:23:52,752 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:23:52,753 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53348, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:23:52,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441. 2024-12-02T15:23:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:23:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:23:52,754 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:23:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-02T15:23:52,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:23:52,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-02T15:23:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T15:23:52,756 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:23:52,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T15:23:52,757 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:23:52,760 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:23:52,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742225_1401 (size=152) 2024-12-02T15:23:52,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742225_1401 (size=152) 2024-12-02T15:23:52,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742225_1401 (size=152) 2024-12-02T15:23:52,766 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:23:52,766 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a07dd0e3653e17be2e0e596d280713d4}, {pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9d2126893e3de75703fde3d34993d9c3}] 2024-12-02T15:23:52,767 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,767 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T15:23:52,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-02T15:23:52,919 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-02T15:23:52,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2902): Flushing a07dd0e3653e17be2e0e596d280713d4 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-02T15:23:52,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2902): Flushing 9d2126893e3de75703fde3d34993d9c3 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-02T15:23:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/.tmp/cf/124612b69e3a4944bf78466738b03511 is 71, key is 02e8b380188d871328a68953f61ac950/cf:q/1733153032724/Put/seqid=0 2024-12-02T15:23:52,942 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/.tmp/cf/44235b2da3644daab87e9d318e589588 is 71, key is 13be1808fffdc686b493832218aec48d/cf:q/1733153032725/Put/seqid=0 2024-12-02T15:23:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742226_1402 (size=8256) 2024-12-02T15:23:52,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742226_1402 (size=8256) 2024-12-02T15:23:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742227_1403 (size=5356) 2024-12-02T15:23:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742227_1403 (size=5356) 2024-12-02T15:23:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742227_1403 (size=5356) 2024-12-02T15:23:52,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742226_1402 (size=8256) 2024-12-02T15:23:52,947 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/.tmp/cf/44235b2da3644daab87e9d318e589588 2024-12-02T15:23:52,947 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/.tmp/cf/124612b69e3a4944bf78466738b03511 2024-12-02T15:23:52,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/.tmp/cf/124612b69e3a4944bf78466738b03511 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/cf/124612b69e3a4944bf78466738b03511 2024-12-02T15:23:52,952 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/.tmp/cf/44235b2da3644daab87e9d318e589588 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/cf/44235b2da3644daab87e9d318e589588 2024-12-02T15:23:52,956 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/cf/44235b2da3644daab87e9d318e589588, entries=46, sequenceid=5, filesize=8.1 K 2024-12-02T15:23:52,956 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/cf/124612b69e3a4944bf78466738b03511, entries=4, sequenceid=5, filesize=5.2 K 2024-12-02T15:23:52,957 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 9d2126893e3de75703fde3d34993d9c3 in 38ms, sequenceid=5, compaction requested=false 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T15:23:52,957 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for a07dd0e3653e17be2e0e596d280713d4 in 38ms, sequenceid=5, compaction requested=false 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2603): Flush status journal for 9d2126893e3de75703fde3d34993d9c3: 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2603): Flush status journal for a07dd0e3653e17be2e0e596d280713d4: 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. for snapshot-testExportExpiredSnapshot completed. 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/cf/44235b2da3644daab87e9d318e589588] hfiles 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/cf/124612b69e3a4944bf78466738b03511] hfiles 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/cf/44235b2da3644daab87e9d318e589588 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,957 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/cf/124612b69e3a4944bf78466738b03511 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742229_1405 (size=103) 2024-12-02T15:23:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742229_1405 (size=103) 2024-12-02T15:23:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742228_1404 (size=103) 2024-12-02T15:23:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742228_1404 (size=103) 2024-12-02T15:23:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742229_1405 (size=103) 2024-12-02T15:23:52,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742228_1404 (size=103) 2024-12-02T15:23:52,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:23:52,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-02T15:23:52,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:23:52,966 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-02T15:23:52,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=186 2024-12-02T15:23:52,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=187 2024-12-02T15:23:52,967 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,967 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,967 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=187, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:23:52,967 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=186, ppid=185, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:23:52,968 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9d2126893e3de75703fde3d34993d9c3 in 202 msec 2024-12-02T15:23:52,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-12-02T15:23:52,969 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a07dd0e3653e17be2e0e596d280713d4 in 202 msec 2024-12-02T15:23:52,969 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:23:52,970 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:23:52,970 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:23:52,970 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,971 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742230_1406 (size=609) 2024-12-02T15:23:52,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742230_1406 (size=609) 2024-12-02T15:23:52,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742230_1406 (size=609) 2024-12-02T15:23:52,985 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:23:52,990 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:23:52,990 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-02T15:23:52,991 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:23:52,992 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 185 2024-12-02T15:23:52,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 237 msec 2024-12-02T15:23:53,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=185 2024-12-02T15:23:53,073 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-12-02T15:23:53,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T15:23:53,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T15:23:53,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T15:23:53,094 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-02T15:23:53,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-02T15:23:53,095 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-02T15:23:54,820 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0007_000001 (auth:SIMPLE) from 127.0.0.1:45406 2024-12-02T15:23:54,829 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000001/launch_container.sh] 2024-12-02T15:23:54,829 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000001/container_tokens] 2024-12-02T15:23:54,829 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0007/container_1733152780087_0007_01_000001/sysfs] 2024-12-02T15:23:55,529 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:24:01,777 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:24:03,083 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153043083 2024-12-02T15:24:03,083 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153043083, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153043083, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:03,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:03,106 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153043083, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153043083/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-02T15:24:03,108 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:24:03,109 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:24:03,110 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T15:24:03,113 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153043113"}]},"ts":"1733153043113"} 2024-12-02T15:24:03,114 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-02T15:24:03,114 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-02T15:24:03,115 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-02T15:24:03,116 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, UNASSIGN}, {pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, UNASSIGN}] 2024-12-02T15:24:03,117 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, UNASSIGN 2024-12-02T15:24:03,117 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, UNASSIGN 2024-12-02T15:24:03,117 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=87e205a09dc0f6af36a7f0bac5bd0ba3, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:03,118 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=c5999f0e0ea17043a3e07c6712f1e8f0, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:03,119 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=191, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, UNASSIGN because future has completed 2024-12-02T15:24:03,119 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:24:03,119 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:24:03,120 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=190, ppid=189, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, UNASSIGN because future has completed 2024-12-02T15:24:03,120 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:24:03,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:24:03,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T15:24:03,271 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(122): Close c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:24:03,271 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(122): Close 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:24:03,271 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:24:03,271 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:24:03,271 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1722): Closing 87e205a09dc0f6af36a7f0bac5bd0ba3, disabling compactions & flushes 2024-12-02T15:24:03,271 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1722): Closing c5999f0e0ea17043a3e07c6712f1e8f0, disabling compactions & flushes 2024-12-02T15:24:03,272 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:24:03,272 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. after waiting 0 ms 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. after waiting 0 ms 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:24:03,272 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:24:03,275 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:24:03,275 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:24:03,276 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:24:03,276 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:24:03,276 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3. 2024-12-02T15:24:03,276 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0. 2024-12-02T15:24:03,276 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] regionserver.HRegion(1676): Region close journal for 87e205a09dc0f6af36a7f0bac5bd0ba3: Waiting for close lock at 1733153043271Running coprocessor pre-close hooks at 1733153043271Disabling compacts and flushes for region at 1733153043271Disabling writes for close at 1733153043272 (+1 ms)Writing region close event to WAL at 1733153043273 (+1 ms)Running coprocessor post-close hooks at 1733153043276 (+3 ms)Closed at 1733153043276 2024-12-02T15:24:03,276 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] regionserver.HRegion(1676): Region close journal for c5999f0e0ea17043a3e07c6712f1e8f0: Waiting for close lock at 1733153043271Running coprocessor pre-close hooks at 1733153043271Disabling compacts and flushes for region at 1733153043271Disabling writes for close at 1733153043272 (+1 ms)Writing region close event to WAL at 1733153043272Running coprocessor post-close hooks at 1733153043276 (+4 ms)Closed at 1733153043276 2024-12-02T15:24:03,278 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=192}] handler.UnassignRegionHandler(157): Closed 87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:24:03,278 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=191 updating hbase:meta row=87e205a09dc0f6af36a7f0bac5bd0ba3, regionState=CLOSED 2024-12-02T15:24:03,278 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=193}] handler.UnassignRegionHandler(157): Closed c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:24:03,279 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=190 updating hbase:meta row=c5999f0e0ea17043a3e07c6712f1e8f0, regionState=CLOSED 2024-12-02T15:24:03,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=192, ppid=191, state=RUNNABLE, hasLock=false; CloseRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:24:03,280 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=190, state=RUNNABLE, hasLock=false; CloseRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:24:03,287 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-12-02T15:24:03,288 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseRegionProcedure 87e205a09dc0f6af36a7f0bac5bd0ba3, server=be0458f36726,43025,1733152773677 in 166 msec 2024-12-02T15:24:03,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=190 2024-12-02T15:24:03,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=190, state=SUCCESS, hasLock=false; CloseRegionProcedure c5999f0e0ea17043a3e07c6712f1e8f0, server=be0458f36726,35155,1733152773848 in 166 msec 2024-12-02T15:24:03,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=87e205a09dc0f6af36a7f0bac5bd0ba3, UNASSIGN in 171 msec 2024-12-02T15:24:03,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=189 2024-12-02T15:24:03,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=189, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=c5999f0e0ea17043a3e07c6712f1e8f0, UNASSIGN in 172 msec 2024-12-02T15:24:03,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=189, resume processing ppid=188 2024-12-02T15:24:03,291 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 175 msec 2024-12-02T15:24:03,292 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153043292"}]},"ts":"1733153043292"} 2024-12-02T15:24:03,293 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-02T15:24:03,293 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-02T15:24:03,295 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 184 msec 2024-12-02T15:24:03,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-12-02T15:24:03,433 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T15:24:03,433 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,434 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=194, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,435 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=194, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,437 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,438 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:24:03,438 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:24:03,439 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/recovered.edits] 2024-12-02T15:24:03,440 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/recovered.edits] 2024-12-02T15:24:03,498 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/cf/26a535b33fa54481a865ddcc4c148bce 2024-12-02T15:24:03,499 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/cf/1fe2ab85a78f4c9eab56043b1c2aeafa 2024-12-02T15:24:03,500 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0/recovered.edits/9.seqid 2024-12-02T15:24:03,500 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/c5999f0e0ea17043a3e07c6712f1e8f0 2024-12-02T15:24:03,501 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3/recovered.edits/9.seqid 2024-12-02T15:24:03,501 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportExpiredSnapshot/87e205a09dc0f6af36a7f0bac5bd0ba3 2024-12-02T15:24:03,501 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-02T15:24:03,565 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=194, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,567 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-02T15:24:03,569 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-02T15:24:03,571 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=194, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,571 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-02T15:24:03,571 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153043571"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:03,571 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153043571"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:03,573 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:24:03,573 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => c5999f0e0ea17043a3e07c6712f1e8f0, NAME => 'testtb-testExportExpiredSnapshot,,1733153030710.c5999f0e0ea17043a3e07c6712f1e8f0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 87e205a09dc0f6af36a7f0bac5bd0ba3, NAME => 'testtb-testExportExpiredSnapshot,1,1733153030710.87e205a09dc0f6af36a7f0bac5bd0ba3.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:24:03,573 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-02T15:24:03,573 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153043573"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:03,574 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-02T15:24:03,575 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=194, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,576 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 142 msec 2024-12-02T15:24:03,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,597 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,597 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,597 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T15:24:03,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T15:24:03,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T15:24:03,598 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,622 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:03,622 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,622 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:03,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=194 2024-12-02T15:24:03,623 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:03,623 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:03,623 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:03,623 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:03,624 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-12-02T15:24:03,624 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-12-02T15:24:03,631 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T15:24:03,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-02T15:24:03,634 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-12-02T15:24:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-02T15:24:03,637 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-12-02T15:24:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-02T15:24:03,660 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=811 (was 818), OpenFileDescriptor=795 (was 826), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=583 (was 732), ProcessCount=13 (was 19), AvailableMemoryMB=1981 (was 1271) - AvailableMemoryMB LEAK? - 2024-12-02T15:24:03,660 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-02T15:24:03,675 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=811, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=583, ProcessCount=13, AvailableMemoryMB=1981 2024-12-02T15:24:03,675 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=811 is superior to 500 2024-12-02T15:24:03,676 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:24:03,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:03,678 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:24:03,678 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:03,678 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 195 2024-12-02T15:24:03,678 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:24:03,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T15:24:03,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742231_1407 (size=412) 2024-12-02T15:24:03,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742231_1407 (size=412) 2024-12-02T15:24:03,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742231_1407 (size=412) 2024-12-02T15:24:03,686 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 457a10ed42c91b616b1b4fe3f1b21e1b, NAME => 'testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:03,686 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => efe68bf3b39c1068cde553fb241e761d, NAME => 'testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742233_1409 (size=73) 2024-12-02T15:24:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742233_1409 (size=73) 2024-12-02T15:24:03,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742233_1409 (size=73) 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 457a10ed42c91b616b1b4fe3f1b21e1b, disabling compactions & flushes 2024-12-02T15:24:03,696 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. after waiting 0 ms 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:03,696 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:03,696 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 457a10ed42c91b616b1b4fe3f1b21e1b: Waiting for close lock at 1733153043696Disabling compacts and flushes for region at 1733153043696Disabling writes for close at 1733153043696Writing region close event to WAL at 1733153043696Closed at 1733153043696 2024-12-02T15:24:03,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742232_1408 (size=73) 2024-12-02T15:24:03,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742232_1408 (size=73) 2024-12-02T15:24:03,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742232_1408 (size=73) 2024-12-02T15:24:03,700 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:03,701 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing efe68bf3b39c1068cde553fb241e761d, disabling compactions & flushes 2024-12-02T15:24:03,701 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:03,701 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:03,701 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. after waiting 0 ms 2024-12-02T15:24:03,701 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:03,701 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:03,701 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for efe68bf3b39c1068cde553fb241e761d: Waiting for close lock at 1733153043701Disabling compacts and flushes for region at 1733153043701Disabling writes for close at 1733153043701Writing region close event to WAL at 1733153043701Closed at 1733153043701 2024-12-02T15:24:03,702 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:24:03,702 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733153043702"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153043702"}]},"ts":"1733153043702"} 2024-12-02T15:24:03,702 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733153043702"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153043702"}]},"ts":"1733153043702"} 2024-12-02T15:24:03,704 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:24:03,705 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:24:03,705 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153043705"}]},"ts":"1733153043705"} 2024-12-02T15:24:03,706 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-02T15:24:03,706 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:24:03,708 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:24:03,708 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:24:03,708 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:24:03,708 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:24:03,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, ASSIGN}, {pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, ASSIGN}] 2024-12-02T15:24:03,709 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, ASSIGN 2024-12-02T15:24:03,709 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, ASSIGN 2024-12-02T15:24:03,710 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:24:03,710 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:24:03,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T15:24:03,860 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:24:03,860 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=457a10ed42c91b616b1b4fe3f1b21e1b, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:03,861 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=efe68bf3b39c1068cde553fb241e761d, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:03,862 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=197, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, ASSIGN because future has completed 2024-12-02T15:24:03,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:24:03,864 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=195, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, ASSIGN because future has completed 2024-12-02T15:24:03,864 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:24:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T15:24:04,017 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,017 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7752): Opening region: {ENCODED => 457a10ed42c91b616b1b4fe3f1b21e1b, NAME => 'testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7752): Opening region: {ENCODED => efe68bf3b39c1068cde553fb241e761d, NAME => 'testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. service=AccessControlService 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. service=AccessControlService 2024-12-02T15:24:04,018 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:24:04,018 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7794): checking encryption for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7794): checking encryption for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(7797): checking classloading for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,018 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(7797): checking classloading for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,019 INFO [StoreOpener-457a10ed42c91b616b1b4fe3f1b21e1b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,019 INFO [StoreOpener-efe68bf3b39c1068cde553fb241e761d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,021 INFO [StoreOpener-457a10ed42c91b616b1b4fe3f1b21e1b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 457a10ed42c91b616b1b4fe3f1b21e1b columnFamilyName cf 2024-12-02T15:24:04,021 DEBUG [StoreOpener-457a10ed42c91b616b1b4fe3f1b21e1b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:04,021 INFO [StoreOpener-457a10ed42c91b616b1b4fe3f1b21e1b-1 {}] regionserver.HStore(327): Store=457a10ed42c91b616b1b4fe3f1b21e1b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:24:04,021 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1038): replaying wal for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,022 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,022 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,022 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1048): stopping wal replay for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,022 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1060): Cleaning up temporary data for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,023 INFO [StoreOpener-efe68bf3b39c1068cde553fb241e761d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region efe68bf3b39c1068cde553fb241e761d columnFamilyName cf 2024-12-02T15:24:04,023 DEBUG [StoreOpener-efe68bf3b39c1068cde553fb241e761d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:04,024 INFO [StoreOpener-efe68bf3b39c1068cde553fb241e761d-1 {}] regionserver.HStore(327): Store=efe68bf3b39c1068cde553fb241e761d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:24:04,024 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1038): replaying wal for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,024 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1093): writing seq id for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,024 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1048): stopping wal replay for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,025 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1060): Cleaning up temporary data for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:24:04,026 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1114): Opened 457a10ed42c91b616b1b4fe3f1b21e1b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74182591, jitterRate=0.1054067462682724}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:24:04,026 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,027 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1093): writing seq id for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,027 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegion(1006): Region open journal for 457a10ed42c91b616b1b4fe3f1b21e1b: Running coprocessor pre-open hook at 1733153044018Writing region info on filesystem at 1733153044018Initializing all the Stores at 1733153044019 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153044019Cleaning up temporary data from old regions at 1733153044022 (+3 ms)Running coprocessor post-open hooks at 1733153044026 (+4 ms)Region opened successfully at 1733153044027 (+1 ms) 2024-12-02T15:24:04,028 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., pid=198, masterSystemTime=1733153044015 2024-12-02T15:24:04,029 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,030 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=198}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,030 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=197 updating hbase:meta row=457a10ed42c91b616b1b4fe3f1b21e1b, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:04,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=198, ppid=197, state=RUNNABLE, hasLock=false; OpenRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:24:04,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=198, resume processing ppid=197 2024-12-02T15:24:04,036 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, ppid=197, state=SUCCESS, hasLock=false; OpenRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677 in 170 msec 2024-12-02T15:24:04,037 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, ASSIGN in 328 msec 2024-12-02T15:24:04,044 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:24:04,045 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1114): Opened efe68bf3b39c1068cde553fb241e761d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74390436, jitterRate=0.10850387811660767}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:24:04,045 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1122): Running coprocessor post-open hooks for efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,045 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegion(1006): Region open journal for efe68bf3b39c1068cde553fb241e761d: Running coprocessor pre-open hook at 1733153044018Writing region info on filesystem at 1733153044018Initializing all the Stores at 1733153044019 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153044019Cleaning up temporary data from old regions at 1733153044025 (+6 ms)Running coprocessor post-open hooks at 1733153044045 (+20 ms)Region opened successfully at 1733153044045 2024-12-02T15:24:04,046 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d., pid=199, masterSystemTime=1733153044015 2024-12-02T15:24:04,048 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,048 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=199}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,049 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=196 updating hbase:meta row=efe68bf3b39c1068cde553fb241e761d, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:04,051 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=196, state=RUNNABLE, hasLock=false; OpenRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:24:04,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=199, resume processing ppid=196 2024-12-02T15:24:04,053 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=196, state=SUCCESS, hasLock=false; OpenRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848 in 188 msec 2024-12-02T15:24:04,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=195 2024-12-02T15:24:04,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=195, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, ASSIGN in 345 msec 2024-12-02T15:24:04,059 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:24:04,059 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153044059"}]},"ts":"1733153044059"} 2024-12-02T15:24:04,062 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-02T15:24:04,063 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=195, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:24:04,063 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-02T15:24:04,066 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:24:04,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:04,097 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:04,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:04,097 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:04,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 429 msec 2024-12-02T15:24:04,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=195 2024-12-02T15:24:04,304 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T15:24:04,305 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-02T15:24:04,305 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:04,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32778 bytes) of info 2024-12-02T15:24:04,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-02T15:24:04,319 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:04,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-02T15:24:04,319 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:24:04,321 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:24:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153044321 (current time:1733153044321). 2024-12-02T15:24:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:24:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T15:24:04,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:24:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f7f6ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:04,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:04,323 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cc7e4fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:04,323 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,324 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49218, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:04,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c11c38c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:04,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:04,325 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,326 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50112, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,327 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:04,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:04,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,327 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4aa853d6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:04,329 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f217f92, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:04,329 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,330 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:04,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3956ea3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:04,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:04,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,333 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50124, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,335 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48930, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,336 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:04,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,337 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:04,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:24:04,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:24:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:24:04,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T15:24:04,339 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:24:04,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T15:24:04,340 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:24:04,343 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:24:04,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742234_1410 (size=185) 2024-12-02T15:24:04,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742234_1410 (size=185) 2024-12-02T15:24:04,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742234_1410 (size=185) 2024-12-02T15:24:04,349 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:24:04,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d}, {pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b}] 2024-12-02T15:24:04,350 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,350 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T15:24:04,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=202 2024-12-02T15:24:04,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=201 2024-12-02T15:24:04,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,503 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.HRegion(2603): Flush status journal for efe68bf3b39c1068cde553fb241e761d: 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.HRegion(2603): Flush status journal for 457a10ed42c91b616b1b4fe3f1b21e1b: 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:04,504 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:04,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:24:04,505 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:24:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742235_1411 (size=76) 2024-12-02T15:24:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742235_1411 (size=76) 2024-12-02T15:24:04,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742235_1411 (size=76) 2024-12-02T15:24:04,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=202}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=202 2024-12-02T15:24:04,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742236_1412 (size=76) 2024-12-02T15:24:04,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742236_1412 (size=76) 2024-12-02T15:24:04,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742236_1412 (size=76) 2024-12-02T15:24:04,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=202 2024-12-02T15:24:04,516 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,516 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=202, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b in 167 msec 2024-12-02T15:24:04,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,533 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=201}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=201 2024-12-02T15:24:04,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=201 2024-12-02T15:24:04,534 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,534 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=201, ppid=200, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=200 2024-12-02T15:24:04,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=200, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d in 186 msec 2024-12-02T15:24:04,538 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:24:04,539 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:24:04,540 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:24:04,540 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,541 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742237_1413 (size=567) 2024-12-02T15:24:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742237_1413 (size=567) 2024-12-02T15:24:04,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742237_1413 (size=567) 2024-12-02T15:24:04,552 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:24:04,556 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:24:04,556 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,557 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=200, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:24:04,557 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 200 2024-12-02T15:24:04,558 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=200, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 219 msec 2024-12-02T15:24:04,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=200 2024-12-02T15:24:04,653 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T15:24:04,656 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='02951cd8e4b069641b406a7f368e7e592', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:24:04,657 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='1b0914032bceae7f899cc9f1fbe39853d', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,657 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='2e7898b8703fdd050d550c1b1d0d2f9b6', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,658 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='36e281033e06c9dc68673867d6a64fe50', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='4450294b2270755450b2402f4d217df6a', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,659 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testEmptyExportFileSystemState', row='5f17c55f2468a8e1a27512f78483149e2', locateType=CURRENT is [region=testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,661 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:24:04,663 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:24:04,663 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:24:04,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-02T15:24:04,666 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,666 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:04,668 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:24:04,673 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:24:04,678 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-12-02T15:24:04,681 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:24:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153044681 (current time:1733153044681). 2024-12-02T15:24:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:24:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-02T15:24:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:24:04,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1230b304, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:04,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:04,682 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:04,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:04,682 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:04,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1294d3aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:04,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:04,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,684 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49262, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:04,684 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6098381, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:04,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:04,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50132, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,687 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,687 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1fb019f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:04,689 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:04,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:04,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:04,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ebf898a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,689 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:04,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:04,690 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,690 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49278, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:04,691 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fb782f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:04,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:04,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:04,692 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,693 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50138, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:04,695 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:04,696 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48944, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:04,697 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:04,697 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:04,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-02T15:24:04,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:24:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-02T15:24:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T15:24:04,699 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:24:04,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T15:24:04,700 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:24:04,702 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:24:04,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742238_1414 (size=180) 2024-12-02T15:24:04,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742238_1414 (size=180) 2024-12-02T15:24:04,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742238_1414 (size=180) 2024-12-02T15:24:04,711 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:24:04,712 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b}] 2024-12-02T15:24:04,712 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,712 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T15:24:04,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-02T15:24:04,864 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-02T15:24:04,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,864 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2902): Flushing efe68bf3b39c1068cde553fb241e761d 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T15:24:04,864 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2902): Flushing 457a10ed42c91b616b1b4fe3f1b21e1b 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T15:24:04,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/.tmp/cf/d9d8f836e1804b368780e4839ffdd0ac is 71, key is 05ce49833cfb6ae1e77437d153723feb/cf:q/1733153044661/Put/seqid=0 2024-12-02T15:24:04,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/.tmp/cf/b96bde7e93f54a23a686c58a2615773b is 71, key is 11bea532da6d4498a987c52a3be69834/cf:q/1733153044662/Put/seqid=0 2024-12-02T15:24:04,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742239_1415 (size=8324) 2024-12-02T15:24:04,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742240_1416 (size=5288) 2024-12-02T15:24:04,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742240_1416 (size=5288) 2024-12-02T15:24:04,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742240_1416 (size=5288) 2024-12-02T15:24:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742239_1415 (size=8324) 2024-12-02T15:24:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742239_1415 (size=8324) 2024-12-02T15:24:04,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/.tmp/cf/d9d8f836e1804b368780e4839ffdd0ac 2024-12-02T15:24:04,885 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/.tmp/cf/b96bde7e93f54a23a686c58a2615773b 2024-12-02T15:24:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/.tmp/cf/d9d8f836e1804b368780e4839ffdd0ac as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac 2024-12-02T15:24:04,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/.tmp/cf/b96bde7e93f54a23a686c58a2615773b as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b 2024-12-02T15:24:04,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac, entries=3, sequenceid=6, filesize=5.2 K 2024-12-02T15:24:04,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b, entries=47, sequenceid=6, filesize=8.1 K 2024-12-02T15:24:04,894 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for efe68bf3b39c1068cde553fb241e761d in 30ms, sequenceid=6, compaction requested=false 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-02T15:24:04,895 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 457a10ed42c91b616b1b4fe3f1b21e1b in 30ms, sequenceid=6, compaction requested=false 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for efe68bf3b39c1068cde553fb241e761d: 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 457a10ed42c91b616b1b4fe3f1b21e1b: 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b] hfiles 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac] hfiles 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,895 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742241_1417 (size=115) 2024-12-02T15:24:04,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742241_1417 (size=115) 2024-12-02T15:24:04,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742241_1417 (size=115) 2024-12-02T15:24:04,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:04,913 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-02T15:24:04,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-12-02T15:24:04,914 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,914 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:04,916 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure efe68bf3b39c1068cde553fb241e761d in 204 msec 2024-12-02T15:24:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742242_1418 (size=115) 2024-12-02T15:24:04,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742242_1418 (size=115) 2024-12-02T15:24:04,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742242_1418 (size=115) 2024-12-02T15:24:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:04,917 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-02T15:24:04,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-12-02T15:24:04,917 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,918 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:04,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-12-02T15:24:04,920 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:24:04,920 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b in 207 msec 2024-12-02T15:24:04,921 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:24:04,922 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:24:04,922 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,923 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742243_1419 (size=645) 2024-12-02T15:24:04,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742243_1419 (size=645) 2024-12-02T15:24:04,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742243_1419 (size=645) 2024-12-02T15:24:04,944 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:24:04,950 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:24:04,951 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:04,952 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:24:04,952 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-02T15:24:04,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 254 msec 2024-12-02T15:24:05,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-12-02T15:24:05,013 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T15:24:05,013 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013 2024-12-02T15:24:05,013 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:05,044 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:05,044 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:05,046 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:24:05,049 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:05,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742244_1420 (size=185) 2024-12-02T15:24:05,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742244_1420 (size=185) 2024-12-02T15:24:05,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742244_1420 (size=185) 2024-12-02T15:24:05,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742245_1421 (size=567) 2024-12-02T15:24:05,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742245_1421 (size=567) 2024-12-02T15:24:05,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742245_1421 (size=567) 2024-12-02T15:24:05,067 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:05,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:05,068 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-13985237423912168232.jar 2024-12-02T15:24:06,049 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,050 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-1873992655446417771.jar 2024-12-02T15:24:06,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,105 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,106 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:06,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:24:06,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:24:06,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:24:06,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:24:06,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:24:06,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:24:06,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:24:06,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:24:06,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:24:06,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:24:06,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:24:06,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:06,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:06,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:06,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:06,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:06,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:06,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:06,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742246_1422 (size=24020) 2024-12-02T15:24:06,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742246_1422 (size=24020) 2024-12-02T15:24:06,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742246_1422 (size=24020) 2024-12-02T15:24:06,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742247_1423 (size=77755) 2024-12-02T15:24:06,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742247_1423 (size=77755) 2024-12-02T15:24:06,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742247_1423 (size=77755) 2024-12-02T15:24:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742248_1424 (size=131360) 2024-12-02T15:24:06,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742248_1424 (size=131360) 2024-12-02T15:24:06,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742248_1424 (size=131360) 2024-12-02T15:24:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742249_1425 (size=111793) 2024-12-02T15:24:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742249_1425 (size=111793) 2024-12-02T15:24:06,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742249_1425 (size=111793) 2024-12-02T15:24:06,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742250_1426 (size=1832290) 2024-12-02T15:24:06,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742250_1426 (size=1832290) 2024-12-02T15:24:06,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742250_1426 (size=1832290) 2024-12-02T15:24:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742251_1427 (size=8360005) 2024-12-02T15:24:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742251_1427 (size=8360005) 2024-12-02T15:24:06,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742251_1427 (size=8360005) 2024-12-02T15:24:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742252_1428 (size=503880) 2024-12-02T15:24:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742252_1428 (size=503880) 2024-12-02T15:24:06,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742252_1428 (size=503880) 2024-12-02T15:24:06,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742253_1429 (size=322274) 2024-12-02T15:24:06,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742253_1429 (size=322274) 2024-12-02T15:24:06,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742253_1429 (size=322274) 2024-12-02T15:24:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742254_1430 (size=20406) 2024-12-02T15:24:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742254_1430 (size=20406) 2024-12-02T15:24:06,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742254_1430 (size=20406) 2024-12-02T15:24:06,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742255_1431 (size=45609) 2024-12-02T15:24:06,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742255_1431 (size=45609) 2024-12-02T15:24:06,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742255_1431 (size=45609) 2024-12-02T15:24:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742256_1432 (size=136454) 2024-12-02T15:24:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742256_1432 (size=136454) 2024-12-02T15:24:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742256_1432 (size=136454) 2024-12-02T15:24:06,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742257_1433 (size=1597136) 2024-12-02T15:24:06,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742257_1433 (size=1597136) 2024-12-02T15:24:06,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742257_1433 (size=1597136) 2024-12-02T15:24:06,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742258_1434 (size=30873) 2024-12-02T15:24:06,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742258_1434 (size=30873) 2024-12-02T15:24:06,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742258_1434 (size=30873) 2024-12-02T15:24:06,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742259_1435 (size=29229) 2024-12-02T15:24:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742259_1435 (size=29229) 2024-12-02T15:24:06,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742259_1435 (size=29229) 2024-12-02T15:24:06,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742260_1436 (size=903849) 2024-12-02T15:24:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742260_1436 (size=903849) 2024-12-02T15:24:06,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742260_1436 (size=903849) 2024-12-02T15:24:06,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742261_1437 (size=5175431) 2024-12-02T15:24:06,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742261_1437 (size=5175431) 2024-12-02T15:24:06,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742261_1437 (size=5175431) 2024-12-02T15:24:06,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742262_1438 (size=232881) 2024-12-02T15:24:06,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742262_1438 (size=232881) 2024-12-02T15:24:06,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742262_1438 (size=232881) 2024-12-02T15:24:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742263_1439 (size=1323991) 2024-12-02T15:24:06,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742263_1439 (size=1323991) 2024-12-02T15:24:06,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742263_1439 (size=1323991) 2024-12-02T15:24:06,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742264_1440 (size=4695811) 2024-12-02T15:24:06,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742264_1440 (size=4695811) 2024-12-02T15:24:06,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742264_1440 (size=4695811) 2024-12-02T15:24:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742265_1441 (size=1877034) 2024-12-02T15:24:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742265_1441 (size=1877034) 2024-12-02T15:24:06,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742265_1441 (size=1877034) 2024-12-02T15:24:06,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742266_1442 (size=217555) 2024-12-02T15:24:06,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742266_1442 (size=217555) 2024-12-02T15:24:06,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742266_1442 (size=217555) 2024-12-02T15:24:06,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742267_1443 (size=6424745) 2024-12-02T15:24:06,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742267_1443 (size=6424745) 2024-12-02T15:24:06,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742267_1443 (size=6424745) 2024-12-02T15:24:06,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742268_1444 (size=4188619) 2024-12-02T15:24:06,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742268_1444 (size=4188619) 2024-12-02T15:24:06,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742268_1444 (size=4188619) 2024-12-02T15:24:06,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742269_1445 (size=127628) 2024-12-02T15:24:06,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742269_1445 (size=127628) 2024-12-02T15:24:06,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742269_1445 (size=127628) 2024-12-02T15:24:06,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742270_1446 (size=443171) 2024-12-02T15:24:06,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742270_1446 (size=443171) 2024-12-02T15:24:06,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742270_1446 (size=443171) 2024-12-02T15:24:06,738 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:24:06,740 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-02T15:24:06,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742271_1447 (size=7) 2024-12-02T15:24:06,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742271_1447 (size=7) 2024-12-02T15:24:06,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742271_1447 (size=7) 2024-12-02T15:24:06,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742272_1448 (size=10) 2024-12-02T15:24:06,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742272_1448 (size=10) 2024-12-02T15:24:06,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742272_1448 (size=10) 2024-12-02T15:24:06,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742273_1449 (size=303986) 2024-12-02T15:24:06,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742273_1449 (size=303986) 2024-12-02T15:24:06,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742273_1449 (size=303986) 2024-12-02T15:24:06,815 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:24:06,815 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:24:06,826 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0008_000001 (auth:SIMPLE) from 127.0.0.1:50666 2024-12-02T15:24:08,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:24:11,769 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0008_000001 (auth:SIMPLE) from 127.0.0.1:32892 2024-12-02T15:24:11,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742274_1450 (size=349660) 2024-12-02T15:24:11,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742274_1450 (size=349660) 2024-12-02T15:24:11,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742274_1450 (size=349660) 2024-12-02T15:24:12,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742275_1451 (size=8568) 2024-12-02T15:24:12,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742275_1451 (size=8568) 2024-12-02T15:24:12,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742275_1451 (size=8568) 2024-12-02T15:24:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742276_1452 (size=460) 2024-12-02T15:24:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742276_1452 (size=460) 2024-12-02T15:24:12,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742276_1452 (size=460) 2024-12-02T15:24:12,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742277_1453 (size=8568) 2024-12-02T15:24:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742277_1453 (size=8568) 2024-12-02T15:24:12,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742277_1453 (size=8568) 2024-12-02T15:24:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742278_1454 (size=349660) 2024-12-02T15:24:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742278_1454 (size=349660) 2024-12-02T15:24:12,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742278_1454 (size=349660) 2024-12-02T15:24:13,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T15:24:13,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-02T15:24:13,095 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-02T15:24:14,918 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:24:14,919 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:24:14,924 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:14,924 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:24:14,925 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-02T15:24:14,925 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153045013/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-02T15:24:14,932 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-02T15:24:14,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:14,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T15:24:14,935 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153054935"}]},"ts":"1733153054935"} 2024-12-02T15:24:14,936 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-02T15:24:14,937 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-02T15:24:14,937 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-02T15:24:14,939 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, UNASSIGN}, {pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, UNASSIGN}] 2024-12-02T15:24:14,939 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, UNASSIGN 2024-12-02T15:24:14,939 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, UNASSIGN 2024-12-02T15:24:14,940 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=457a10ed42c91b616b1b4fe3f1b21e1b, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:14,940 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=efe68bf3b39c1068cde553fb241e761d, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:14,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=209, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, UNASSIGN because future has completed 2024-12-02T15:24:14,942 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:24:14,942 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:24:14,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=208, ppid=207, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, UNASSIGN because future has completed 2024-12-02T15:24:14,943 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:24:14,943 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:24:15,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T15:24:15,095 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(122): Close 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:15,095 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:24:15,095 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1722): Closing 457a10ed42c91b616b1b4fe3f1b21e1b, disabling compactions & flushes 2024-12-02T15:24:15,095 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:15,095 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:15,095 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. after waiting 0 ms 2024-12-02T15:24:15,095 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:15,096 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(122): Close efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:15,096 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:24:15,096 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1722): Closing efe68bf3b39c1068cde553fb241e761d, disabling compactions & flushes 2024-12-02T15:24:15,096 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:15,096 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:15,096 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. after waiting 0 ms 2024-12-02T15:24:15,096 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:15,099 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:24:15,099 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:24:15,099 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:24:15,099 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:24:15,099 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b. 2024-12-02T15:24:15,100 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d. 2024-12-02T15:24:15,100 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] regionserver.HRegion(1676): Region close journal for 457a10ed42c91b616b1b4fe3f1b21e1b: Waiting for close lock at 1733153055095Running coprocessor pre-close hooks at 1733153055095Disabling compacts and flushes for region at 1733153055095Disabling writes for close at 1733153055095Writing region close event to WAL at 1733153055096 (+1 ms)Running coprocessor post-close hooks at 1733153055099 (+3 ms)Closed at 1733153055099 2024-12-02T15:24:15,100 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] regionserver.HRegion(1676): Region close journal for efe68bf3b39c1068cde553fb241e761d: Waiting for close lock at 1733153055096Running coprocessor pre-close hooks at 1733153055096Disabling compacts and flushes for region at 1733153055096Disabling writes for close at 1733153055096Writing region close event to WAL at 1733153055097 (+1 ms)Running coprocessor post-close hooks at 1733153055099 (+2 ms)Closed at 1733153055099 2024-12-02T15:24:15,102 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=211}] handler.UnassignRegionHandler(157): Closed efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:15,102 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=208 updating hbase:meta row=efe68bf3b39c1068cde553fb241e761d, regionState=CLOSED 2024-12-02T15:24:15,102 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=210}] handler.UnassignRegionHandler(157): Closed 457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:15,103 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=209 updating hbase:meta row=457a10ed42c91b616b1b4fe3f1b21e1b, regionState=CLOSED 2024-12-02T15:24:15,104 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=208, state=RUNNABLE, hasLock=false; CloseRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:24:15,105 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=210, ppid=209, state=RUNNABLE, hasLock=false; CloseRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:24:15,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=208 2024-12-02T15:24:15,108 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=208, state=SUCCESS, hasLock=false; CloseRegionProcedure efe68bf3b39c1068cde553fb241e761d, server=be0458f36726,35155,1733152773848 in 162 msec 2024-12-02T15:24:15,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-12-02T15:24:15,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=efe68bf3b39c1068cde553fb241e761d, UNASSIGN in 169 msec 2024-12-02T15:24:15,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseRegionProcedure 457a10ed42c91b616b1b4fe3f1b21e1b, server=be0458f36726,43025,1733152773677 in 164 msec 2024-12-02T15:24:15,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=209, resume processing ppid=207 2024-12-02T15:24:15,111 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, ppid=207, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=457a10ed42c91b616b1b4fe3f1b21e1b, UNASSIGN in 170 msec 2024-12-02T15:24:15,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-12-02T15:24:15,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 175 msec 2024-12-02T15:24:15,114 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153055114"}]},"ts":"1733153055114"} 2024-12-02T15:24:15,115 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-02T15:24:15,116 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-02T15:24:15,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 184 msec 2024-12-02T15:24:15,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-12-02T15:24:15,254 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T15:24:15,255 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,260 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=212, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,262 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=212, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,264 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,265 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:15,265 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:15,268 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/recovered.edits] 2024-12-02T15:24:15,268 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/recovered.edits] 2024-12-02T15:24:15,320 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/cf/d9d8f836e1804b368780e4839ffdd0ac 2024-12-02T15:24:15,320 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/cf/b96bde7e93f54a23a686c58a2615773b 2024-12-02T15:24:15,325 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d/recovered.edits/9.seqid 2024-12-02T15:24:15,325 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b/recovered.edits/9.seqid 2024-12-02T15:24:15,325 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/efe68bf3b39c1068cde553fb241e761d 2024-12-02T15:24:15,339 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testEmptyExportFileSystemState/457a10ed42c91b616b1b4fe3f1b21e1b 2024-12-02T15:24:15,339 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-02T15:24:15,342 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=212, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,346 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-02T15:24:15,350 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-02T15:24:15,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=212, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,351 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-02T15:24:15,352 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153055351"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:15,352 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153055351"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:15,354 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:24:15,354 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => efe68bf3b39c1068cde553fb241e761d, NAME => 'testtb-testEmptyExportFileSystemState,,1733153043676.efe68bf3b39c1068cde553fb241e761d.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 457a10ed42c91b616b1b4fe3f1b21e1b, NAME => 'testtb-testEmptyExportFileSystemState,1,1733153043676.457a10ed42c91b616b1b4fe3f1b21e1b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:24:15,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-02T15:24:15,354 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153055354"}]},"ts":"9223372036854775807"} 2024-12-02T15:24:15,356 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-02T15:24:15,356 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=212, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,357 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 101 msec 2024-12-02T15:24:15,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,363 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,363 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T15:24:15,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T15:24:15,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T15:24:15,364 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-02T15:24:15,371 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,371 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,371 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=212 2024-12-02T15:24:15,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,372 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,372 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-12-02T15:24:15,372 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-12-02T15:24:15,377 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T15:24:15,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:15,379 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-12-02T15:24:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-02T15:24:15,402 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=821 (was 811) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:50340 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 135669) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6699 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35507 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:58992 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:39823 from appattempt_1733152780087_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:42398 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1483383305_1 at /127.0.0.1:33100 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1483383305_1 at /127.0.0.1:50434 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:35507 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=821 (was 795) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=548 (was 583), ProcessCount=19 (was 13) - ProcessCount LEAK? -, AvailableMemoryMB=1047 (was 1981) 2024-12-02T15:24:15,402 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-12-02T15:24:15,420 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=821, OpenFileDescriptor=821, MaxFileDescriptor=1048576, SystemLoadAverage=548, ProcessCount=19, AvailableMemoryMB=1047 2024-12-02T15:24:15,420 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=821 is superior to 500 2024-12-02T15:24:15,422 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:24:15,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:24:15,423 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:24:15,423 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:15,424 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 213 2024-12-02T15:24:15,424 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:24:15,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T15:24:15,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742279_1455 (size=404) 2024-12-02T15:24:15,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742279_1455 (size=404) 2024-12-02T15:24:15,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742279_1455 (size=404) 2024-12-02T15:24:15,433 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 6625c05dd9746ea58549adddbe4c0b35, NAME => 'testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:15,433 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => dfad3ba1f868d6526e2daed67068d47a, NAME => 'testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:15,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742280_1456 (size=65) 2024-12-02T15:24:15,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742280_1456 (size=65) 2024-12-02T15:24:15,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742280_1456 (size=65) 2024-12-02T15:24:15,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742281_1457 (size=65) 2024-12-02T15:24:15,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742281_1457 (size=65) 2024-12-02T15:24:15,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742281_1457 (size=65) 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing dfad3ba1f868d6526e2daed67068d47a, disabling compactions & flushes 2024-12-02T15:24:15,445 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. after waiting 0 ms 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,445 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for dfad3ba1f868d6526e2daed67068d47a: Waiting for close lock at 1733153055445Disabling compacts and flushes for region at 1733153055445Disabling writes for close at 1733153055445Writing region close event to WAL at 1733153055445Closed at 1733153055445 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 6625c05dd9746ea58549adddbe4c0b35, disabling compactions & flushes 2024-12-02T15:24:15,445 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. after waiting 0 ms 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,445 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,445 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 6625c05dd9746ea58549adddbe4c0b35: Waiting for close lock at 1733153055445Disabling compacts and flushes for region at 1733153055445Disabling writes for close at 1733153055445Writing region close event to WAL at 1733153055445Closed at 1733153055445 2024-12-02T15:24:15,446 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:24:15,447 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733153055446"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153055446"}]},"ts":"1733153055446"} 2024-12-02T15:24:15,447 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733153055446"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153055446"}]},"ts":"1733153055446"} 2024-12-02T15:24:15,448 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:24:15,449 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:24:15,449 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153055449"}]},"ts":"1733153055449"} 2024-12-02T15:24:15,450 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-02T15:24:15,451 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:24:15,451 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:24:15,452 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:24:15,452 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:24:15,452 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:24:15,452 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:24:15,452 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, ASSIGN}, {pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, ASSIGN}] 2024-12-02T15:24:15,453 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, ASSIGN 2024-12-02T15:24:15,453 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, ASSIGN 2024-12-02T15:24:15,453 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:24:15,453 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:24:15,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T15:24:15,604 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:24:15,605 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=dfad3ba1f868d6526e2daed67068d47a, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:15,605 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=6625c05dd9746ea58549adddbe4c0b35, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:15,609 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=215, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, ASSIGN because future has completed 2024-12-02T15:24:15,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:24:15,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=213, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, ASSIGN because future has completed 2024-12-02T15:24:15,613 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:24:15,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T15:24:15,772 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,772 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,772 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7752): Opening region: {ENCODED => dfad3ba1f868d6526e2daed67068d47a, NAME => 'testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:24:15,772 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7752): Opening region: {ENCODED => 6625c05dd9746ea58549adddbe4c0b35, NAME => 'testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:24:15,773 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. service=AccessControlService 2024-12-02T15:24:15,773 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. service=AccessControlService 2024-12-02T15:24:15,773 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:24:15,773 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7794): checking encryption for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7794): checking encryption for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(7797): checking classloading for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,774 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(7797): checking classloading for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,776 INFO [StoreOpener-6625c05dd9746ea58549adddbe4c0b35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,776 INFO [StoreOpener-dfad3ba1f868d6526e2daed67068d47a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,776 INFO [StoreOpener-6625c05dd9746ea58549adddbe4c0b35-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6625c05dd9746ea58549adddbe4c0b35 columnFamilyName cf 2024-12-02T15:24:15,777 INFO [StoreOpener-dfad3ba1f868d6526e2daed67068d47a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dfad3ba1f868d6526e2daed67068d47a columnFamilyName cf 2024-12-02T15:24:15,777 DEBUG [StoreOpener-6625c05dd9746ea58549adddbe4c0b35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:15,777 DEBUG [StoreOpener-dfad3ba1f868d6526e2daed67068d47a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:24:15,777 INFO [StoreOpener-dfad3ba1f868d6526e2daed67068d47a-1 {}] regionserver.HStore(327): Store=dfad3ba1f868d6526e2daed67068d47a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:24:15,777 INFO [StoreOpener-6625c05dd9746ea58549adddbe4c0b35-1 {}] regionserver.HStore(327): Store=6625c05dd9746ea58549adddbe4c0b35/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:24:15,777 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1038): replaying wal for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,777 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1038): replaying wal for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,777 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,777 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1048): stopping wal replay for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1048): stopping wal replay for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1060): Cleaning up temporary data for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,778 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1060): Cleaning up temporary data for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,779 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1093): writing seq id for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,779 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1093): writing seq id for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,780 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:24:15,780 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:24:15,781 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1114): Opened dfad3ba1f868d6526e2daed67068d47a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64803063, jitterRate=-0.03435911238193512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:24:15,781 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1122): Running coprocessor post-open hooks for dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:15,781 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1114): Opened 6625c05dd9746ea58549adddbe4c0b35; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67283861, jitterRate=0.0026076585054397583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:24:15,781 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:15,781 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegion(1006): Region open journal for 6625c05dd9746ea58549adddbe4c0b35: Running coprocessor pre-open hook at 1733153055774Writing region info on filesystem at 1733153055774Initializing all the Stores at 1733153055775 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153055775Cleaning up temporary data from old regions at 1733153055778 (+3 ms)Running coprocessor post-open hooks at 1733153055781 (+3 ms)Region opened successfully at 1733153055781 2024-12-02T15:24:15,781 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegion(1006): Region open journal for dfad3ba1f868d6526e2daed67068d47a: Running coprocessor pre-open hook at 1733153055774Writing region info on filesystem at 1733153055774Initializing all the Stores at 1733153055775 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153055775Cleaning up temporary data from old regions at 1733153055778 (+3 ms)Running coprocessor post-open hooks at 1733153055781 (+3 ms)Region opened successfully at 1733153055781 2024-12-02T15:24:15,782 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35., pid=217, masterSystemTime=1733153055765 2024-12-02T15:24:15,782 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a., pid=216, masterSystemTime=1733153055764 2024-12-02T15:24:15,783 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,783 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=216}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:15,784 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=215 updating hbase:meta row=dfad3ba1f868d6526e2daed67068d47a, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:24:15,784 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,784 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=217}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:15,784 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=214 updating hbase:meta row=6625c05dd9746ea58549adddbe4c0b35, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:24:15,785 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=216, ppid=215, state=RUNNABLE, hasLock=false; OpenRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:24:15,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=214, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:24:15,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=216, resume processing ppid=215 2024-12-02T15:24:15,788 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, ppid=215, state=SUCCESS, hasLock=false; OpenRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677 in 176 msec 2024-12-02T15:24:15,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=217, resume processing ppid=214 2024-12-02T15:24:15,788 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=214, state=SUCCESS, hasLock=false; OpenRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848 in 174 msec 2024-12-02T15:24:15,789 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, ASSIGN in 336 msec 2024-12-02T15:24:15,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=213 2024-12-02T15:24:15,790 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=213, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, ASSIGN in 336 msec 2024-12-02T15:24:15,790 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:24:15,790 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153055790"}]},"ts":"1733153055790"} 2024-12-02T15:24:15,791 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-02T15:24:15,792 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=213, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:24:15,792 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-02T15:24:15,795 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T15:24:15,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,830 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,830 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,840 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-02T15:24:15,842 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 417 msec 2024-12-02T15:24:16,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=213 2024-12-02T15:24:16,055 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T15:24:16,055 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-02T15:24:16,056 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:16,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32829 bytes) of info 2024-12-02T15:24:16,066 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-02T15:24:16,067 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:16,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportWithChecksum assigned. 2024-12-02T15:24:16,067 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T15:24:16,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T15:24:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153056070 (current time:1733153056070). 2024-12-02T15:24:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:24:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T15:24:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:24:16,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@484f3af5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:16,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:16,071 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35ab9f8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:16,072 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,073 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58090, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:16,074 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15843591, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:16,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:16,075 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,075 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38220, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,076 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:16,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,077 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1587eeed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:16,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:16,078 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3673bba9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:16,078 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,079 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58098, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:16,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24634329, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:16,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:16,081 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,082 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38224, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:16,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,085 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,086 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,087 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T15:24:16,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:24:16,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T15:24:16,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T15:24:16,089 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:24:16,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T15:24:16,090 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:24:16,092 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:24:16,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742282_1458 (size=161) 2024-12-02T15:24:16,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742282_1458 (size=161) 2024-12-02T15:24:16,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742282_1458 (size=161) 2024-12-02T15:24:16,100 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:24:16,100 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35}, {pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a}] 2024-12-02T15:24:16,103 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,103 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T15:24:16,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=220 2024-12-02T15:24:16,256 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=219 2024-12-02T15:24:16,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:16,257 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.HRegion(2603): Flush status journal for dfad3ba1f868d6526e2daed67068d47a: 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.HRegion(2603): Flush status journal for 6625c05dd9746ea58549adddbe4c0b35: 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. for emptySnaptb0-testExportWithChecksum completed. 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:16,258 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:16,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:24:16,259 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:24:16,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742283_1459 (size=68) 2024-12-02T15:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742284_1460 (size=68) 2024-12-02T15:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742283_1459 (size=68) 2024-12-02T15:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742284_1460 (size=68) 2024-12-02T15:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742284_1460 (size=68) 2024-12-02T15:24:16,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742283_1459 (size=68) 2024-12-02T15:24:16,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:16,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:16,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=220}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=220 2024-12-02T15:24:16,267 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=219}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=219 2024-12-02T15:24:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=219 2024-12-02T15:24:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=220 2024-12-02T15:24:16,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,267 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,267 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=219, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,267 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=220, ppid=218, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 in 168 msec 2024-12-02T15:24:16,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-12-02T15:24:16,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a in 168 msec 2024-12-02T15:24:16,270 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:24:16,271 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:24:16,271 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:24:16,271 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-02T15:24:16,272 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-02T15:24:16,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742285_1461 (size=543) 2024-12-02T15:24:16,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742285_1461 (size=543) 2024-12-02T15:24:16,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742285_1461 (size=543) 2024-12-02T15:24:16,281 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:24:16,287 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:24:16,287 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-02T15:24:16,289 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=218, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:24:16,289 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 218 2024-12-02T15:24:16,290 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=218, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 202 msec 2024-12-02T15:24:16,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=218 2024-12-02T15:24:16,403 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T15:24:16,408 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='07b9481f2d57685d9e8a8a5ada04bd050', locateType=CURRENT is [region=testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:24:16,410 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='119eb0c15b4c6ea1b5805ada2598253a1', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:16,412 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='2edc4d50df1cf072335b8a82614fc91d0', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:16,413 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportWithChecksum', row='3d95d3c95b49e4023a26db05a8ce43d5c', locateType=CURRENT is [region=testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:16,416 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:24:16,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:24:16,418 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T15:24:16,420 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-12-02T15:24:16,421 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:16,421 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:24:16,422 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T15:24:16,427 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T15:24:16,433 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-12-02T15:24:16,435 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153056435 (current time:1733153056435). 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f3f6bb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:16,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:16,436 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:16,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:16,436 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c7f2e46, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:16,437 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,438 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58112, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:16,438 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14a1e9b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:16,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:16,439 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,440 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38236, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,441 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,441 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@791220d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:24:16,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:24:16,442 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:24:16,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:24:16,442 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:24:16,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49668247, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:24:16,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:24:16,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,443 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58134, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:24:16,444 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@389bc479, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:24:16,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:24:16,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:24:16,445 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,446 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38244, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,447 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:24:16,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:24:16,449 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53570, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:24:16,449 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:24:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:24:16,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:24:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-02T15:24:16,450 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:24:16,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:24:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-02T15:24:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T15:24:16,452 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:24:16,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T15:24:16,452 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:24:16,454 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:24:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742286_1462 (size=156) 2024-12-02T15:24:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742286_1462 (size=156) 2024-12-02T15:24:16,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742286_1462 (size=156) 2024-12-02T15:24:16,460 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:24:16,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a}] 2024-12-02T15:24:16,461 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,461 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T15:24:16,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-12-02T15:24:16,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-12-02T15:24:16,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:16,614 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:16,615 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2902): Flushing 6625c05dd9746ea58549adddbe4c0b35 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-02T15:24:16,615 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2902): Flushing dfad3ba1f868d6526e2daed67068d47a 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-02T15:24:16,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/.tmp/cf/45d9014e442348c3965eedbaaf355bd7 is 71, key is 10c3a52a0a66cb0e9a6b4d8ab8a7dd22/cf:q/1733153056417/Put/seqid=0 2024-12-02T15:24:16,635 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/.tmp/cf/15bf3ae4f81e41408e106cc9568bb5a8 is 71, key is 0ea141348f7be1cbfe62061e8366d351/cf:q/1733153056416/Put/seqid=0 2024-12-02T15:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742287_1463 (size=5288) 2024-12-02T15:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742288_1464 (size=8324) 2024-12-02T15:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742288_1464 (size=8324) 2024-12-02T15:24:16,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742287_1463 (size=5288) 2024-12-02T15:24:16,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742288_1464 (size=8324) 2024-12-02T15:24:16,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742287_1463 (size=5288) 2024-12-02T15:24:16,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/.tmp/cf/45d9014e442348c3965eedbaaf355bd7 2024-12-02T15:24:16,641 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/.tmp/cf/15bf3ae4f81e41408e106cc9568bb5a8 2024-12-02T15:24:16,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/.tmp/cf/15bf3ae4f81e41408e106cc9568bb5a8 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 2024-12-02T15:24:16,645 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/.tmp/cf/45d9014e442348c3965eedbaaf355bd7 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 2024-12-02T15:24:16,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7, entries=47, sequenceid=6, filesize=8.1 K 2024-12-02T15:24:16,649 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8, entries=3, sequenceid=6, filesize=5.2 K 2024-12-02T15:24:16,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 6625c05dd9746ea58549adddbe4c0b35 in 35ms, sequenceid=6, compaction requested=false 2024-12-02T15:24:16,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for dfad3ba1f868d6526e2daed67068d47a in 36ms, sequenceid=6, compaction requested=false 2024-12-02T15:24:16,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-02T15:24:16,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 6625c05dd9746ea58549adddbe4c0b35: 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for dfad3ba1f868d6526e2daed67068d47a: 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. for snaptb0-testExportWithChecksum completed. 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. for snaptb0-testExportWithChecksum completed. 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7] hfiles 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8] hfiles 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T15:24:16,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 for snapshot=snaptb0-testExportWithChecksum 2024-12-02T15:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742290_1466 (size=107) 2024-12-02T15:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742290_1466 (size=107) 2024-12-02T15:24:16,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742290_1466 (size=107) 2024-12-02T15:24:16,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:24:16,660 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-12-02T15:24:16,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-12-02T15:24:16,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,661 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:24:16,663 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure dfad3ba1f868d6526e2daed67068d47a in 202 msec 2024-12-02T15:24:16,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742289_1465 (size=107) 2024-12-02T15:24:16,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742289_1465 (size=107) 2024-12-02T15:24:16,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742289_1465 (size=107) 2024-12-02T15:24:16,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:24:16,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-12-02T15:24:16,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-12-02T15:24:16,665 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,665 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:24:16,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=222, resume processing ppid=221 2024-12-02T15:24:16,667 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6625c05dd9746ea58549adddbe4c0b35 in 205 msec 2024-12-02T15:24:16,667 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:24:16,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:24:16,668 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:24:16,668 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-02T15:24:16,669 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T15:24:16,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742291_1467 (size=621) 2024-12-02T15:24:16,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742291_1467 (size=621) 2024-12-02T15:24:16,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742291_1467 (size=621) 2024-12-02T15:24:16,677 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:24:16,681 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:24:16,682 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T15:24:16,683 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:24:16,683 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-12-02T15:24:16,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 233 msec 2024-12-02T15:24:16,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-12-02T15:24:16,774 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T15:24:16,774 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774 2024-12-02T15:24:16,775 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:16,806 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:16,806 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@53ba3ed2, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T15:24:16,807 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:24:16,811 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T15:24:16,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:16,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:16,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-8820256632082515071.jar 2024-12-02T15:24:17,606 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,607 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-14811027968464648507.jar 2024-12-02T15:24:17,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,658 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:17,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:24:17,659 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:24:17,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:24:17,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:24:17,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:24:17,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:24:17,660 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:17,661 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:17,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:17,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:17,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:17,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:17,662 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:17,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742292_1468 (size=24020) 2024-12-02T15:24:17,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742292_1468 (size=24020) 2024-12-02T15:24:17,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742292_1468 (size=24020) 2024-12-02T15:24:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742293_1469 (size=77755) 2024-12-02T15:24:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742293_1469 (size=77755) 2024-12-02T15:24:17,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742293_1469 (size=77755) 2024-12-02T15:24:17,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742294_1470 (size=131360) 2024-12-02T15:24:17,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742294_1470 (size=131360) 2024-12-02T15:24:17,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742294_1470 (size=131360) 2024-12-02T15:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742295_1471 (size=111793) 2024-12-02T15:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742295_1471 (size=111793) 2024-12-02T15:24:17,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742295_1471 (size=111793) 2024-12-02T15:24:17,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742296_1472 (size=1832290) 2024-12-02T15:24:17,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742296_1472 (size=1832290) 2024-12-02T15:24:17,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742296_1472 (size=1832290) 2024-12-02T15:24:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742297_1473 (size=6424745) 2024-12-02T15:24:17,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742297_1473 (size=6424745) 2024-12-02T15:24:17,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742297_1473 (size=6424745) 2024-12-02T15:24:17,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742298_1474 (size=8360005) 2024-12-02T15:24:17,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742298_1474 (size=8360005) 2024-12-02T15:24:17,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742298_1474 (size=8360005) 2024-12-02T15:24:17,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742299_1475 (size=443171) 2024-12-02T15:24:17,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742299_1475 (size=443171) 2024-12-02T15:24:17,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742299_1475 (size=443171) 2024-12-02T15:24:17,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742300_1476 (size=503880) 2024-12-02T15:24:17,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742300_1476 (size=503880) 2024-12-02T15:24:17,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742300_1476 (size=503880) 2024-12-02T15:24:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742301_1477 (size=322274) 2024-12-02T15:24:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742301_1477 (size=322274) 2024-12-02T15:24:17,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742301_1477 (size=322274) 2024-12-02T15:24:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742302_1478 (size=20406) 2024-12-02T15:24:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742302_1478 (size=20406) 2024-12-02T15:24:17,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742302_1478 (size=20406) 2024-12-02T15:24:17,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742303_1479 (size=45609) 2024-12-02T15:24:17,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742303_1479 (size=45609) 2024-12-02T15:24:17,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742303_1479 (size=45609) 2024-12-02T15:24:17,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742304_1480 (size=136454) 2024-12-02T15:24:17,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742304_1480 (size=136454) 2024-12-02T15:24:17,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742304_1480 (size=136454) 2024-12-02T15:24:17,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742305_1481 (size=1597136) 2024-12-02T15:24:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742305_1481 (size=1597136) 2024-12-02T15:24:17,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742305_1481 (size=1597136) 2024-12-02T15:24:17,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742306_1482 (size=30873) 2024-12-02T15:24:17,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742306_1482 (size=30873) 2024-12-02T15:24:17,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742306_1482 (size=30873) 2024-12-02T15:24:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742307_1483 (size=29229) 2024-12-02T15:24:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742307_1483 (size=29229) 2024-12-02T15:24:17,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742307_1483 (size=29229) 2024-12-02T15:24:17,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742308_1484 (size=903849) 2024-12-02T15:24:17,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742308_1484 (size=903849) 2024-12-02T15:24:17,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742308_1484 (size=903849) 2024-12-02T15:24:17,897 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742309_1485 (size=5175431) 2024-12-02T15:24:17,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742309_1485 (size=5175431) 2024-12-02T15:24:17,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742309_1485 (size=5175431) 2024-12-02T15:24:17,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742310_1486 (size=232881) 2024-12-02T15:24:17,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742310_1486 (size=232881) 2024-12-02T15:24:17,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742310_1486 (size=232881) 2024-12-02T15:24:17,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742311_1487 (size=1323991) 2024-12-02T15:24:17,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742311_1487 (size=1323991) 2024-12-02T15:24:17,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742311_1487 (size=1323991) 2024-12-02T15:24:17,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742312_1488 (size=4695811) 2024-12-02T15:24:17,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742312_1488 (size=4695811) 2024-12-02T15:24:17,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742312_1488 (size=4695811) 2024-12-02T15:24:17,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742313_1489 (size=1877034) 2024-12-02T15:24:17,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742313_1489 (size=1877034) 2024-12-02T15:24:17,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742313_1489 (size=1877034) 2024-12-02T15:24:17,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742314_1490 (size=217555) 2024-12-02T15:24:17,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742314_1490 (size=217555) 2024-12-02T15:24:17,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742314_1490 (size=217555) 2024-12-02T15:24:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742315_1491 (size=4188619) 2024-12-02T15:24:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742315_1491 (size=4188619) 2024-12-02T15:24:17,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742315_1491 (size=4188619) 2024-12-02T15:24:17,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742316_1492 (size=127628) 2024-12-02T15:24:17,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742316_1492 (size=127628) 2024-12-02T15:24:17,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742316_1492 (size=127628) 2024-12-02T15:24:17,995 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:24:17,998 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T15:24:17,999 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-02T15:24:17,999 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-02T15:24:18,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742317_1493 (size=441) 2024-12-02T15:24:18,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742317_1493 (size=441) 2024-12-02T15:24:18,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742317_1493 (size=441) 2024-12-02T15:24:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742318_1494 (size=21) 2024-12-02T15:24:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742318_1494 (size=21) 2024-12-02T15:24:18,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742318_1494 (size=21) 2024-12-02T15:24:18,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742319_1495 (size=304129) 2024-12-02T15:24:18,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742319_1495 (size=304129) 2024-12-02T15:24:18,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742319_1495 (size=304129) 2024-12-02T15:24:18,597 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:24:18,986 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:24:18,986 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:24:18,990 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0008_000001 (auth:SIMPLE) from 127.0.0.1:33232 2024-12-02T15:24:19,000 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0008/container_1733152780087_0008_01_000001/launch_container.sh] 2024-12-02T15:24:19,000 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0008/container_1733152780087_0008_01_000001/container_tokens] 2024-12-02T15:24:19,000 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0008/container_1733152780087_0008_01_000001/sysfs] 2024-12-02T15:24:19,784 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:59448 2024-12-02T15:24:23,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T15:24:23,093 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-02T15:24:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-02T15:24:24,656 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:47794 2024-12-02T15:24:24,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742320_1496 (size=349827) 2024-12-02T15:24:24,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742320_1496 (size=349827) 2024-12-02T15:24:24,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742320_1496 (size=349827) 2024-12-02T15:24:26,994 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:51740 2024-12-02T15:24:26,995 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:49792 2024-12-02T15:24:28,596 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:24:31,777 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:24:33,544 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000002/launch_container.sh] 2024-12-02T15:24:33,544 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000002/container_tokens] 2024-12-02T15:24:33,544 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000002/sysfs] 2024-12-02T15:24:33,757 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=920.55 KB, freeSize=879.10 MB, max=880 MB, blockCount=5, accesses=7, hits=2, hitRatio=28.57%, , cachingAccesses=7, cachingHits=2, cachingHitsRatio=28.57%, evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T15:24:33,816 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T15:24:33,875 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-12-02T15:24:33,993 DEBUG [master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-02T15:24:33,993 DEBUG [master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:34,800 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54664 2024-12-02T15:24:35,547 INFO [regionserver/be0458f36726:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T15:24:35,556 INFO [regionserver/be0458f36726:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T15:24:35,591 INFO [regionserver/be0458f36726:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-12-02T15:24:36,477 INFO [regionserver/be0458f36726:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 118270 ms 2024-12-02T15:24:36,501 DEBUG [master/be0458f36726:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-02T15:24:36,502 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a07dd0e3653e17be2e0e596d280713d4 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:24:36,502 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6625c05dd9746ea58549adddbe4c0b35 changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:24:36,502 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region dfad3ba1f868d6526e2daed67068d47a changed from -1.0 to 0.0, refreshing cache 2024-12-02T15:24:36,503 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 9d2126893e3de75703fde3d34993d9c3 changed from -1.0 to 0.0, refreshing cache Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:37,423 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9d2126893e3de75703fde3d34993d9c3, had cached 0 bytes from a total of 8256 2024-12-02T15:24:37,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a07dd0e3653e17be2e0e596d280713d4, had cached 0 bytes from a total of 5356 2024-12-02T15:24:37,814 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54676 2024-12-02T15:24:38,002 DEBUG [master/be0458f36726:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-12-02T15:24:38,003 INFO [master/be0458f36726:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-12-02T15:24:38,003 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-12-02T15:24:38,004 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 2 regions 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 2 regions 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:24:38,005 INFO [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:24:38,005 INFO [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:24:38,005 INFO [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:24:38,005 DEBUG [master/be0458f36726:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-12-02T15:24:38,012 INFO [master/be0458f36726:0.Chore.1 {}] balancer.StochasticLoadBalancer(395): Cluster wide - skipping load balancing because weighted average imbalance=0.014046237127914422 <= threshold(0.025). If you want more aggressive balancing, either lower hbase.master.balancer.stochastic.minCostNeedBalance from 0.025 or increase the relative multiplier(s) of the specific cost function(s). functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.8474978590633961, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8577153282654146, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-12-02T15:24:38,013 DEBUG [master/be0458f36726:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-12-02T15:24:38,024 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportWithChecksum because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T15:24:38,025 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testExportExpiredSnapshot because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-12-02T15:24:41,450 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000004/launch_container.sh] 2024-12-02T15:24:41,450 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000004/container_tokens] 2024-12-02T15:24:41,450 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000004/sysfs] 2024-12-02T15:24:41,572 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000003/launch_container.sh] 2024-12-02T15:24:41,572 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000003/container_tokens] 2024-12-02T15:24:41,573 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000003/sysfs] 2024-12-02T15:24:41,796 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:42,843 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54678 2024-12-02T15:24:43,569 INFO [regionserver/be0458f36726:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. because 82c95700d9ff2efea2507bd84223c4b0/l has an old edit so flush to free WALs after random delay 215938 ms 2024-12-02T15:24:44,430 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000005/launch_container.sh] 2024-12-02T15:24:44,431 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000005/container_tokens] 2024-12-02T15:24:44,431 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000005/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:45,846 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54382 2024-12-02T15:24:49,203 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000006/launch_container.sh] 2024-12-02T15:24:49,203 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000006/container_tokens] 2024-12-02T15:24:49,203 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000006/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:50,850 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54386 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/local-export-1733153056774/archive/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-02T15:24:52,852 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:54394 2024-12-02T15:24:55,033 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:52064 2024-12-02T15:24:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742321_1497 (size=30190) 2024-12-02T15:24:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742321_1497 (size=30190) 2024-12-02T15:24:55,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742321_1497 (size=30190) 2024-12-02T15:24:55,081 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733152780087_0009_01_000009 is : 143 2024-12-02T15:24:55,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742322_1498 (size=460) 2024-12-02T15:24:55,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742322_1498 (size=460) 2024-12-02T15:24:55,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742322_1498 (size=460) 2024-12-02T15:24:55,110 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000009/launch_container.sh] 2024-12-02T15:24:55,110 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000009/container_tokens] 2024-12-02T15:24:55,110 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_0/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000009/sysfs] 2024-12-02T15:24:55,120 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000008/launch_container.sh] 2024-12-02T15:24:55,120 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000008/container_tokens] 2024-12-02T15:24:55,120 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000008/sysfs] 2024-12-02T15:24:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742323_1499 (size=30190) 2024-12-02T15:24:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742323_1499 (size=30190) 2024-12-02T15:24:55,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742323_1499 (size=30190) 2024-12-02T15:24:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742324_1500 (size=349827) 2024-12-02T15:24:55,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742324_1500 (size=349827) 2024-12-02T15:24:55,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742324_1500 (size=349827) 2024-12-02T15:24:55,183 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:52074 2024-12-02T15:24:56,316 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733152780087_0009_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:24:56,317 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317 2024-12-02T15:24:56,318 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:56,341 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:24:56,341 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T15:24:56,343 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:24:56,347 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-02T15:24:56,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742325_1501 (size=621) 2024-12-02T15:24:56,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742326_1502 (size=156) 2024-12-02T15:24:56,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742325_1501 (size=621) 2024-12-02T15:24:56,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742325_1501 (size=621) 2024-12-02T15:24:56,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742326_1502 (size=156) 2024-12-02T15:24:56,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:56,358 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:56,359 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:56,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742326_1502 (size=156) 2024-12-02T15:24:57,026 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000007/launch_container.sh] 2024-12-02T15:24:57,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000007/container_tokens] 2024-12-02T15:24:57,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000007/sysfs] 2024-12-02T15:24:57,123 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-7679927629408429323.jar 2024-12-02T15:24:57,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,124 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-13192622713591321665.jar 2024-12-02T15:24:57,175 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,176 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:24:57,177 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:24:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:24:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:24:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:24:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:24:57,178 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:24:57,179 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:57,180 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:24:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742327_1503 (size=24020) 2024-12-02T15:24:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742327_1503 (size=24020) 2024-12-02T15:24:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742327_1503 (size=24020) 2024-12-02T15:24:57,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742328_1504 (size=77755) 2024-12-02T15:24:57,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742328_1504 (size=77755) 2024-12-02T15:24:57,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742328_1504 (size=77755) 2024-12-02T15:24:57,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742329_1505 (size=443171) 2024-12-02T15:24:57,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742329_1505 (size=443171) 2024-12-02T15:24:57,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742329_1505 (size=443171) 2024-12-02T15:24:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742330_1506 (size=131360) 2024-12-02T15:24:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742330_1506 (size=131360) 2024-12-02T15:24:57,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742330_1506 (size=131360) 2024-12-02T15:24:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742331_1507 (size=111793) 2024-12-02T15:24:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742331_1507 (size=111793) 2024-12-02T15:24:57,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742331_1507 (size=111793) 2024-12-02T15:24:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742332_1508 (size=1832290) 2024-12-02T15:24:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742332_1508 (size=1832290) 2024-12-02T15:24:57,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742332_1508 (size=1832290) 2024-12-02T15:24:57,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742333_1509 (size=8360005) 2024-12-02T15:24:57,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742333_1509 (size=8360005) 2024-12-02T15:24:57,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742333_1509 (size=8360005) 2024-12-02T15:24:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742334_1510 (size=503880) 2024-12-02T15:24:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742334_1510 (size=503880) 2024-12-02T15:24:57,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742334_1510 (size=503880) 2024-12-02T15:24:57,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742335_1511 (size=322274) 2024-12-02T15:24:57,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742335_1511 (size=322274) 2024-12-02T15:24:57,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742335_1511 (size=322274) 2024-12-02T15:24:57,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742336_1512 (size=20406) 2024-12-02T15:24:57,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742336_1512 (size=20406) 2024-12-02T15:24:57,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742336_1512 (size=20406) 2024-12-02T15:24:57,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742337_1513 (size=45609) 2024-12-02T15:24:57,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742337_1513 (size=45609) 2024-12-02T15:24:57,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742337_1513 (size=45609) 2024-12-02T15:24:57,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742338_1514 (size=136454) 2024-12-02T15:24:57,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742338_1514 (size=136454) 2024-12-02T15:24:57,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742338_1514 (size=136454) 2024-12-02T15:24:57,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742339_1515 (size=1597136) 2024-12-02T15:24:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742339_1515 (size=1597136) 2024-12-02T15:24:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742339_1515 (size=1597136) 2024-12-02T15:24:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742340_1516 (size=30873) 2024-12-02T15:24:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742340_1516 (size=30873) 2024-12-02T15:24:57,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742340_1516 (size=30873) 2024-12-02T15:24:57,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742341_1517 (size=29229) 2024-12-02T15:24:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742341_1517 (size=29229) 2024-12-02T15:24:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742341_1517 (size=29229) 2024-12-02T15:24:57,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742342_1518 (size=6424745) 2024-12-02T15:24:57,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742342_1518 (size=6424745) 2024-12-02T15:24:57,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742342_1518 (size=6424745) 2024-12-02T15:24:57,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742343_1519 (size=903849) 2024-12-02T15:24:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742343_1519 (size=903849) 2024-12-02T15:24:57,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742343_1519 (size=903849) 2024-12-02T15:24:57,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742344_1520 (size=5175431) 2024-12-02T15:24:57,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742344_1520 (size=5175431) 2024-12-02T15:24:57,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742344_1520 (size=5175431) 2024-12-02T15:24:57,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742345_1521 (size=232881) 2024-12-02T15:24:57,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742345_1521 (size=232881) 2024-12-02T15:24:57,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742345_1521 (size=232881) 2024-12-02T15:24:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742346_1522 (size=1323991) 2024-12-02T15:24:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742346_1522 (size=1323991) 2024-12-02T15:24:57,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742346_1522 (size=1323991) 2024-12-02T15:24:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742347_1523 (size=4695811) 2024-12-02T15:24:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742347_1523 (size=4695811) 2024-12-02T15:24:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742347_1523 (size=4695811) 2024-12-02T15:24:57,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742348_1524 (size=1877034) 2024-12-02T15:24:57,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742348_1524 (size=1877034) 2024-12-02T15:24:57,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742348_1524 (size=1877034) 2024-12-02T15:24:57,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742349_1525 (size=217555) 2024-12-02T15:24:57,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742349_1525 (size=217555) 2024-12-02T15:24:57,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742349_1525 (size=217555) 2024-12-02T15:24:57,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742350_1526 (size=4188619) 2024-12-02T15:24:57,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742350_1526 (size=4188619) 2024-12-02T15:24:57,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742350_1526 (size=4188619) 2024-12-02T15:24:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742351_1527 (size=127628) 2024-12-02T15:24:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742351_1527 (size=127628) 2024-12-02T15:24:57,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742351_1527 (size=127628) 2024-12-02T15:24:57,455 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:24:57,457 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-02T15:24:57,458 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.1 K 2024-12-02T15:24:57,458 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.2 K 2024-12-02T15:24:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742352_1528 (size=441) 2024-12-02T15:24:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742352_1528 (size=441) 2024-12-02T15:24:57,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742352_1528 (size=441) 2024-12-02T15:24:57,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742353_1529 (size=21) 2024-12-02T15:24:57,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742353_1529 (size=21) 2024-12-02T15:24:57,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742353_1529 (size=21) 2024-12-02T15:24:57,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742354_1530 (size=304079) 2024-12-02T15:24:57,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742354_1530 (size=304079) 2024-12-02T15:24:57,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742354_1530 (size=304079) 2024-12-02T15:25:00,774 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6625c05dd9746ea58549adddbe4c0b35, had cached 0 bytes from a total of 5288 2024-12-02T15:25:00,774 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region dfad3ba1f868d6526e2daed67068d47a, had cached 0 bytes from a total of 8324 2024-12-02T15:25:01,245 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:25:01,245 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:25:01,249 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0009_000001 (auth:SIMPLE) from 127.0.0.1:34380 2024-12-02T15:25:01,259 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000001/launch_container.sh] 2024-12-02T15:25:01,259 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000001/container_tokens] 2024-12-02T15:25:01,259 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0009/container_1733152780087_0009_01_000001/sysfs] 2024-12-02T15:25:01,778 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:25:02,114 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:52078 2024-12-02T15:25:06,908 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:49496 2024-12-02T15:25:07,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742355_1531 (size=349777) 2024-12-02T15:25:07,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742355_1531 (size=349777) 2024-12-02T15:25:07,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742355_1531 (size=349777) 2024-12-02T15:25:09,404 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:51484 2024-12-02T15:25:09,408 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:34988 2024-12-02T15:25:14,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742356_1532 (size=8324) 2024-12-02T15:25:14,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742356_1532 (size=8324) 2024-12-02T15:25:14,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742356_1532 (size=8324) 2024-12-02T15:25:14,710 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000002/launch_container.sh] 2024-12-02T15:25:14,710 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000002/container_tokens] 2024-12-02T15:25:14,710 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000002/sysfs] 2024-12-02T15:25:15,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742358_1534 (size=5288) 2024-12-02T15:25:15,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742358_1534 (size=5288) 2024-12-02T15:25:15,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742358_1534 (size=5288) 2024-12-02T15:25:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742357_1533 (size=22141) 2024-12-02T15:25:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742357_1533 (size=22141) 2024-12-02T15:25:15,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742357_1533 (size=22141) 2024-12-02T15:25:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742359_1535 (size=463) 2024-12-02T15:25:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742359_1535 (size=463) 2024-12-02T15:25:15,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742359_1535 (size=463) 2024-12-02T15:25:15,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742360_1536 (size=22141) 2024-12-02T15:25:15,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742360_1536 (size=22141) 2024-12-02T15:25:15,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742360_1536 (size=22141) 2024-12-02T15:25:15,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742361_1537 (size=349777) 2024-12-02T15:25:15,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742361_1537 (size=349777) 2024-12-02T15:25:15,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742361_1537 (size=349777) 2024-12-02T15:25:15,693 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:42794 2024-12-02T15:25:15,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000003/launch_container.sh] 2024-12-02T15:25:15,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000003/container_tokens] 2024-12-02T15:25:15,694 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_3/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000003/sysfs] 2024-12-02T15:25:17,706 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:25:17,711 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:25:17,717 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-12-02T15:25:17,717 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:25:17,717 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:25:17,717 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T15:25:17,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T15:25:17,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T15:25:17,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-02T15:25:17,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-02T15:25:17,718 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153096317/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-02T15:25:17,725 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-02T15:25:17,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:17,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T15:25:17,729 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153117729"}]},"ts":"1733153117729"} 2024-12-02T15:25:17,733 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-02T15:25:17,733 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-02T15:25:17,734 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-02T15:25:17,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, UNASSIGN}, {pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, UNASSIGN}] 2024-12-02T15:25:17,737 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, UNASSIGN 2024-12-02T15:25:17,738 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, UNASSIGN 2024-12-02T15:25:17,739 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=dfad3ba1f868d6526e2daed67068d47a, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:25:17,739 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=6625c05dd9746ea58549adddbe4c0b35, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:25:17,740 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=be0458f36726,35155,1733152773848, table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T15:25:17,741 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=226, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, UNASSIGN because future has completed 2024-12-02T15:25:17,743 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:25:17,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:25:17,743 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=227, ppid=225, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, UNASSIGN because future has completed 2024-12-02T15:25:17,744 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:25:17,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:25:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T15:25:17,897 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(122): Close 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:25:17,897 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(122): Close dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1722): Closing 6625c05dd9746ea58549adddbe4c0b35, disabling compactions & flushes 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1722): Closing dfad3ba1f868d6526e2daed67068d47a, disabling compactions & flushes 2024-12-02T15:25:17,898 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:25:17,898 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. after waiting 0 ms 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. after waiting 0 ms 2024-12-02T15:25:17,898 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:25:17,906 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:25:17,906 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:25:17,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:25:17,907 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35. 2024-12-02T15:25:17,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] regionserver.HRegion(1676): Region close journal for 6625c05dd9746ea58549adddbe4c0b35: Waiting for close lock at 1733153117898Running coprocessor pre-close hooks at 1733153117898Disabling compacts and flushes for region at 1733153117898Disabling writes for close at 1733153117898Writing region close event to WAL at 1733153117901 (+3 ms)Running coprocessor post-close hooks at 1733153117907 (+6 ms)Closed at 1733153117907 2024-12-02T15:25:17,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:25:17,907 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a. 2024-12-02T15:25:17,907 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] regionserver.HRegion(1676): Region close journal for dfad3ba1f868d6526e2daed67068d47a: Waiting for close lock at 1733153117898Running coprocessor pre-close hooks at 1733153117898Disabling compacts and flushes for region at 1733153117898Disabling writes for close at 1733153117898Writing region close event to WAL at 1733153117900 (+2 ms)Running coprocessor post-close hooks at 1733153117907 (+7 ms)Closed at 1733153117907 2024-12-02T15:25:17,908 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=229}] handler.UnassignRegionHandler(157): Closed dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:25:17,909 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=227 updating hbase:meta row=dfad3ba1f868d6526e2daed67068d47a, regionState=CLOSED 2024-12-02T15:25:17,909 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=228}] handler.UnassignRegionHandler(157): Closed 6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:25:17,910 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=226 updating hbase:meta row=6625c05dd9746ea58549adddbe4c0b35, regionState=CLOSED 2024-12-02T15:25:17,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=227, state=RUNNABLE, hasLock=false; CloseRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:25:17,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=228, ppid=226, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:25:17,913 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=227 2024-12-02T15:25:17,913 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=226 2024-12-02T15:25:17,914 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=226, state=SUCCESS, hasLock=false; CloseRegionProcedure 6625c05dd9746ea58549adddbe4c0b35, server=be0458f36726,35155,1733152773848 in 169 msec 2024-12-02T15:25:17,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=227, state=SUCCESS, hasLock=false; CloseRegionProcedure dfad3ba1f868d6526e2daed67068d47a, server=be0458f36726,43025,1733152773677 in 167 msec 2024-12-02T15:25:17,914 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dfad3ba1f868d6526e2daed67068d47a, UNASSIGN in 177 msec 2024-12-02T15:25:17,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=225 2024-12-02T15:25:17,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=225, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6625c05dd9746ea58549adddbe4c0b35, UNASSIGN in 178 msec 2024-12-02T15:25:17,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=225, resume processing ppid=224 2024-12-02T15:25:17,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 181 msec 2024-12-02T15:25:17,918 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153117918"}]},"ts":"1733153117918"} 2024-12-02T15:25:17,919 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-02T15:25:17,919 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-02T15:25:17,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 195 msec 2024-12-02T15:25:18,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-12-02T15:25:18,045 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T15:25:18,046 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-02T15:25:18,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,051 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=230, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-02T15:25:18,052 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=230, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,054 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-02T15:25:18,056 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:25:18,056 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:25:18,057 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/recovered.edits] 2024-12-02T15:25:18,057 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/recovered.edits] 2024-12-02T15:25:18,060 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/cf/45d9014e442348c3965eedbaaf355bd7 2024-12-02T15:25:18,060 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/cf/15bf3ae4f81e41408e106cc9568bb5a8 2024-12-02T15:25:18,062 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35/recovered.edits/9.seqid 2024-12-02T15:25:18,072 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a/recovered.edits/9.seqid 2024-12-02T15:25:18,073 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/6625c05dd9746ea58549adddbe4c0b35 2024-12-02T15:25:18,073 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportWithChecksum/dfad3ba1f868d6526e2daed67068d47a 2024-12-02T15:25:18,073 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-02T15:25:18,075 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=230, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,077 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-02T15:25:18,079 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-02T15:25:18,081 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=230, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,081 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-02T15:25:18,081 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153118081"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:18,081 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153118081"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:18,083 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:25:18,083 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 6625c05dd9746ea58549adddbe4c0b35, NAME => 'testtb-testExportWithChecksum,,1733153055421.6625c05dd9746ea58549adddbe4c0b35.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => dfad3ba1f868d6526e2daed67068d47a, NAME => 'testtb-testExportWithChecksum,1,1733153055421.dfad3ba1f868d6526e2daed67068d47a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:25:18,083 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-02T15:25:18,083 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153118083"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:18,084 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-12-02T15:25:18,085 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=230, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-02T15:25:18,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 38 msec 2024-12-02T15:25:18,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,110 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T15:25:18,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T15:25:18,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T15:25:18,111 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,118 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-02T15:25:18,118 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,118 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=230 2024-12-02T15:25:18,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,120 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,120 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-12-02T15:25:18,120 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-12-02T15:25:18,127 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T15:25:18,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-02T15:25:18,131 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-12-02T15:25:18,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-02T15:25:18,155 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=814 (was 821), OpenFileDescriptor=816 (was 821), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=767 (was 548) - SystemLoadAverage LEAK? -, ProcessCount=23 (was 19) - ProcessCount LEAK? -, AvailableMemoryMB=875 (was 1047) 2024-12-02T15:25:18,155 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-02T15:25:18,172 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=814, OpenFileDescriptor=816, MaxFileDescriptor=1048576, SystemLoadAverage=767, ProcessCount=23, AvailableMemoryMB=874 2024-12-02T15:25:18,172 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=814 is superior to 500 2024-12-02T15:25:18,174 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-02T15:25:18,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:18,176 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-02T15:25:18,176 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:25:18,176 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 231 2024-12-02T15:25:18,176 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-02T15:25:18,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T15:25:18,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742362_1538 (size=418) 2024-12-02T15:25:18,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742362_1538 (size=418) 2024-12-02T15:25:18,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742362_1538 (size=418) 2024-12-02T15:25:18,186 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a570bf33c6368beb80e3083d8a523164, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:25:18,187 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4e2f4d4a8d7c58d9e796d0b446a7db4b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:25:18,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742363_1539 (size=79) 2024-12-02T15:25:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742363_1539 (size=79) 2024-12-02T15:25:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742363_1539 (size=79) 2024-12-02T15:25:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742364_1540 (size=79) 2024-12-02T15:25:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742364_1540 (size=79) 2024-12-02T15:25:18,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742364_1540 (size=79) 2024-12-02T15:25:18,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T15:25:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T15:25:18,598 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:25:18,598 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing a570bf33c6368beb80e3083d8a523164, disabling compactions & flushes 2024-12-02T15:25:18,599 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 4e2f4d4a8d7c58d9e796d0b446a7db4b, disabling compactions & flushes 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,599 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. after waiting 0 ms 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. after waiting 0 ms 2024-12-02T15:25:18,599 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,599 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for a570bf33c6368beb80e3083d8a523164: Waiting for close lock at 1733153118599Disabling compacts and flushes for region at 1733153118599Disabling writes for close at 1733153118599Writing region close event to WAL at 1733153118599Closed at 1733153118599 2024-12-02T15:25:18,599 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4e2f4d4a8d7c58d9e796d0b446a7db4b: Waiting for close lock at 1733153118599Disabling compacts and flushes for region at 1733153118599Disabling writes for close at 1733153118599Writing region close event to WAL at 1733153118599Closed at 1733153118599 2024-12-02T15:25:18,602 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-02T15:25:18,603 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733153118602"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153118602"}]},"ts":"1733153118602"} 2024-12-02T15:25:18,603 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733153118602"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733153118602"}]},"ts":"1733153118602"} 2024-12-02T15:25:18,607 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-12-02T15:25:18,608 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-02T15:25:18,608 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153118608"}]},"ts":"1733153118608"} 2024-12-02T15:25:18,609 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-02T15:25:18,609 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {be0458f36726=0} racks are {/default-rack=0} 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-02T15:25:18,610 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-02T15:25:18,610 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-02T15:25:18,610 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-02T15:25:18,610 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-02T15:25:18,611 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, ASSIGN}, {pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, ASSIGN}] 2024-12-02T15:25:18,611 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, ASSIGN 2024-12-02T15:25:18,612 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, ASSIGN 2024-12-02T15:25:18,612 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, ASSIGN; state=OFFLINE, location=be0458f36726,35155,1733152773848; forceNewPlan=false, retain=false 2024-12-02T15:25:18,612 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, ASSIGN; state=OFFLINE, location=be0458f36726,43025,1733152773677; forceNewPlan=false, retain=false 2024-12-02T15:25:18,763 INFO [be0458f36726:34441 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-02T15:25:18,764 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=a570bf33c6368beb80e3083d8a523164, regionState=OPENING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:25:18,764 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=4e2f4d4a8d7c58d9e796d0b446a7db4b, regionState=OPENING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:25:18,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=233, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, ASSIGN because future has completed 2024-12-02T15:25:18,769 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:25:18,770 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=231, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, ASSIGN because future has completed 2024-12-02T15:25:18,771 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:25:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T15:25:18,928 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,928 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,928 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7752): Opening region: {ENCODED => a570bf33c6368beb80e3083d8a523164, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.', STARTKEY => '', ENDKEY => '1'} 2024-12-02T15:25:18,928 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7752): Opening region: {ENCODED => 4e2f4d4a8d7c58d9e796d0b446a7db4b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.', STARTKEY => '1', ENDKEY => ''} 2024-12-02T15:25:18,928 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. service=AccessControlService 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. service=AccessControlService 2024-12-02T15:25:18,929 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:25:18,929 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7794): checking encryption for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7794): checking encryption for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(7797): checking classloading for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,929 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(7797): checking classloading for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,931 INFO [StoreOpener-a570bf33c6368beb80e3083d8a523164-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,931 INFO [StoreOpener-4e2f4d4a8d7c58d9e796d0b446a7db4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,933 INFO [StoreOpener-4e2f4d4a8d7c58d9e796d0b446a7db4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4e2f4d4a8d7c58d9e796d0b446a7db4b columnFamilyName cf 2024-12-02T15:25:18,933 INFO [StoreOpener-a570bf33c6368beb80e3083d8a523164-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a570bf33c6368beb80e3083d8a523164 columnFamilyName cf 2024-12-02T15:25:18,933 DEBUG [StoreOpener-4e2f4d4a8d7c58d9e796d0b446a7db4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:25:18,933 DEBUG [StoreOpener-a570bf33c6368beb80e3083d8a523164-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-02T15:25:18,933 INFO [StoreOpener-a570bf33c6368beb80e3083d8a523164-1 {}] regionserver.HStore(327): Store=a570bf33c6368beb80e3083d8a523164/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:25:18,933 INFO [StoreOpener-4e2f4d4a8d7c58d9e796d0b446a7db4b-1 {}] regionserver.HStore(327): Store=4e2f4d4a8d7c58d9e796d0b446a7db4b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-02T15:25:18,933 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1038): replaying wal for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,933 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1038): replaying wal for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,934 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,934 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,934 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,935 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,935 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1048): stopping wal replay for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,935 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1060): Cleaning up temporary data for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,935 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1048): stopping wal replay for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,935 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1060): Cleaning up temporary data for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,936 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1093): writing seq id for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,936 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1093): writing seq id for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,937 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:25:18,938 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1114): Opened a570bf33c6368beb80e3083d8a523164; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62937585, jitterRate=-0.06215690076351166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:25:18,938 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:18,938 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-02T15:25:18,938 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1114): Opened 4e2f4d4a8d7c58d9e796d0b446a7db4b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60733385, jitterRate=-0.09500204026699066}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-02T15:25:18,938 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:18,938 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegion(1006): Region open journal for a570bf33c6368beb80e3083d8a523164: Running coprocessor pre-open hook at 1733153118929Writing region info on filesystem at 1733153118929Initializing all the Stores at 1733153118930 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153118930Cleaning up temporary data from old regions at 1733153118935 (+5 ms)Running coprocessor post-open hooks at 1733153118938 (+3 ms)Region opened successfully at 1733153118938 2024-12-02T15:25:18,938 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegion(1006): Region open journal for 4e2f4d4a8d7c58d9e796d0b446a7db4b: Running coprocessor pre-open hook at 1733153118929Writing region info on filesystem at 1733153118929Initializing all the Stores at 1733153118930 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733153118930Cleaning up temporary data from old regions at 1733153118935 (+5 ms)Running coprocessor post-open hooks at 1733153118938 (+3 ms)Region opened successfully at 1733153118938 2024-12-02T15:25:18,939 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b., pid=234, masterSystemTime=1733153118923 2024-12-02T15:25:18,939 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164., pid=235, masterSystemTime=1733153118923 2024-12-02T15:25:18,940 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,940 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=235}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:18,941 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=232 updating hbase:meta row=a570bf33c6368beb80e3083d8a523164, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:25:18,941 DEBUG [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,941 INFO [RS_OPEN_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_OPEN_REGION, pid=234}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:18,941 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=233 updating hbase:meta row=4e2f4d4a8d7c58d9e796d0b446a7db4b, regionState=OPEN, openSeqNum=2, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:25:18,942 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=232, state=RUNNABLE, hasLock=false; OpenRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:25:18,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=234, ppid=233, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:25:18,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=235, resume processing ppid=232 2024-12-02T15:25:18,944 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=232, state=SUCCESS, hasLock=false; OpenRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677 in 172 msec 2024-12-02T15:25:18,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=234, resume processing ppid=233 2024-12-02T15:25:18,945 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, ppid=233, state=SUCCESS, hasLock=false; OpenRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848 in 175 msec 2024-12-02T15:25:18,945 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, ASSIGN in 333 msec 2024-12-02T15:25:18,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=233, resume processing ppid=231 2024-12-02T15:25:18,946 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, ppid=231, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, ASSIGN in 334 msec 2024-12-02T15:25:18,947 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-02T15:25:18,947 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153118947"}]},"ts":"1733153118947"} 2024-12-02T15:25:18,948 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-02T15:25:18,949 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=231, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-02T15:25:18,949 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-02T15:25:18,951 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T15:25:18,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,985 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:18,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,994 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:18,997 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 819 msec 2024-12-02T15:25:19,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=231 2024-12-02T15:25:19,315 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T15:25:19,315 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-02T15:25:19,316 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:25:19,324 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=45405 {}] regionserver.StoreScanner(1138): Switch to stream read (scanned=32844 bytes) of info 2024-12-02T15:25:19,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-02T15:25:19,326 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:25:19,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-02T15:25:19,326 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T15:25:19,329 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T15:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153119329 (current time:1733153119329). 2024-12-02T15:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T15:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:25:19,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c92ede4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:25:19,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:25:19,330 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a831842, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:25:19,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,332 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43876, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:25:19,332 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c6428b2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:25:19,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:25:19,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,334 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36568, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,335 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:25:19,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:25:19,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,335 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:25:19,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b189cc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:25:19,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:25:19,337 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@565cddb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:25:19,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,338 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43892, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:25:19,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1178566e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:25:19,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:25:19,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,340 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36580, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:25:19,342 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,343 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35870, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,344 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:25:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:25:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,344 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:25:19,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T15:25:19,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:25:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T15:25:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-02T15:25:19,346 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:25:19,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T15:25:19,347 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:25:19,349 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:25:19,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742365_1541 (size=203) 2024-12-02T15:25:19,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742365_1541 (size=203) 2024-12-02T15:25:19,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742365_1541 (size=203) 2024-12-02T15:25:19,354 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:25:19,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164}, {pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b}] 2024-12-02T15:25:19,355 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:19,355 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:19,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T15:25:19,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=238 2024-12-02T15:25:19,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=237 2024-12-02T15:25:19,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:19,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:19,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.HRegion(2603): Flush status journal for 4e2f4d4a8d7c58d9e796d0b446a7db4b: 2024-12-02T15:25:19,509 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.HRegion(2603): Flush status journal for a570bf33c6368beb80e3083d8a523164: 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:25:19,510 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742366_1542 (size=82) 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742366_1542 (size=82) 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742367_1543 (size=82) 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742366_1542 (size=82) 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742367_1543 (size=82) 2024-12-02T15:25:19,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742367_1543 (size=82) 2024-12-02T15:25:19,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:19,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:19,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=237}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=237 2024-12-02T15:25:19,518 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=238}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=238 2024-12-02T15:25:19,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=237 2024-12-02T15:25:19,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=238 2024-12-02T15:25:19,519 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:19,519 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:19,519 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=238, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:19,519 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=237, ppid=236, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:19,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b in 166 msec 2024-12-02T15:25:19,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=236 2024-12-02T15:25:19,522 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=236, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 in 166 msec 2024-12-02T15:25:19,522 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:25:19,523 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:25:19,523 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:25:19,523 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,524 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742368_1544 (size=585) 2024-12-02T15:25:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742368_1544 (size=585) 2024-12-02T15:25:19,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742368_1544 (size=585) 2024-12-02T15:25:19,538 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:25:19,542 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:25:19,542 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,544 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=236, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:25:19,544 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 236 2024-12-02T15:25:19,545 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=236, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 199 msec 2024-12-02T15:25:19,570 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-02T15:25:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=236 2024-12-02T15:25:19,664 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T15:25:19,671 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='08a0c113138785ff034b7f685a7540529', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:25:19,673 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='1b4d8ce5cf07799410c3ca039cbe4c7bf', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:25:19,673 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='2aa908396ccf1c36707421c371c335e9a', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:25:19,674 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithSkipTmp', row='3feaa9f1aa20454c2a430eaeb8799f722', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b., hostname=be0458f36726,35155,1733152773848, seqNum=2] 2024-12-02T15:25:19,680 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43025 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:25:19,681 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35155 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-02T15:25:19,682 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T15:25:19,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:19,684 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:19,684 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-02T15:25:19,686 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T15:25:19,689 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T15:25:19,693 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-12-02T15:25:19,695 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T15:25:19,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1733153119695 (current time:1733153119695). 2024-12-02T15:25:19,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-02T15:25:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-02T15:25:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-12-02T15:25:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8ed13f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:25:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:25:19,697 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:25:19,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:25:19,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:25:19,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7575063a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,697 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:25:19,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:25:19,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,698 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43916, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:25:19,699 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cb8e273, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:25:19,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:25:19,700 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,701 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36584, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,702 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:25:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:25:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,702 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:25:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17331598, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ClusterIdFetcher(90): Going to request be0458f36726,34441,-1 for getting cluster id 2024-12-02T15:25:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-02T15:25:19,704 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '8e50677f-f1c7-447a-8806-ffbc6b162076' 2024-12-02T15:25:19,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-02T15:25:19,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "8e50677f-f1c7-447a-8806-ffbc6b162076" 2024-12-02T15:25:19,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d7ee298, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [be0458f36726,34441,-1] 2024-12-02T15:25:19,704 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-02T15:25:19,705 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,705 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43922, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-02T15:25:19,706 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@420e7c01, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-02T15:25:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-02T15:25:19,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=be0458f36726,45405,1733152773787, seqNum=-1] 2024-12-02T15:25:19,707 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,708 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36590, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,709 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., hostname=be0458f36726,43025,1733152773677, seqNum=2] 2024-12-02T15:25:19,710 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-02T15:25:19,711 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35882, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-02T15:25:19,712 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441. 2024-12-02T15:25:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor255.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-12-02T15:25:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:25:19,712 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:25:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-02T15:25:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-12-02T15:25:19,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-02T15:25:19,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T15:25:19,715 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-02T15:25:19,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T15:25:19,716 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-02T15:25:19,718 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-02T15:25:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742369_1545 (size=198) 2024-12-02T15:25:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742369_1545 (size=198) 2024-12-02T15:25:19,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742369_1545 (size=198) 2024-12-02T15:25:19,724 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-02T15:25:19,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b}] 2024-12-02T15:25:19,725 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:19,725 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T15:25:19,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43025 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-12-02T15:25:19,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35155 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-12-02T15:25:19,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:19,878 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:19,879 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2902): Flushing a570bf33c6368beb80e3083d8a523164 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-02T15:25:19,879 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2902): Flushing 4e2f4d4a8d7c58d9e796d0b446a7db4b 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-02T15:25:19,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/.tmp/cf/39fce440bbfb411eb7821694ae84d5c9 is 71, key is 135475a7c3ff01b8ca0a52a2bf1b6626/cf:q/1733153119681/Put/seqid=0 2024-12-02T15:25:19,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/.tmp/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b is 71, key is 0aaaf1bfbfa06df78e34531e1fe9f3a6/cf:q/1733153119680/Put/seqid=0 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742371_1547 (size=5424) 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742370_1546 (size=8188) 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742370_1546 (size=8188) 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742371_1547 (size=5424) 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742370_1546 (size=8188) 2024-12-02T15:25:19,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742371_1547 (size=5424) 2024-12-02T15:25:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T15:25:20,302 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/.tmp/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b 2024-12-02T15:25:20,302 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/.tmp/cf/39fce440bbfb411eb7821694ae84d5c9 2024-12-02T15:25:20,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/.tmp/cf/39fce440bbfb411eb7821694ae84d5c9 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9 2024-12-02T15:25:20,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/.tmp/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b 2024-12-02T15:25:20,314 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9, entries=45, sequenceid=6, filesize=8.0 K 2024-12-02T15:25:20,314 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b, entries=5, sequenceid=6, filesize=5.3 K 2024-12-02T15:25:20,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for a570bf33c6368beb80e3083d8a523164 in 437ms, sequenceid=6, compaction requested=false 2024-12-02T15:25:20,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 4e2f4d4a8d7c58d9e796d0b446a7db4b in 437ms, sequenceid=6, compaction requested=false 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 4e2f4d4a8d7c58d9e796d0b446a7db4b: 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for a570bf33c6368beb80e3083d8a523164: 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9] hfiles 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b] hfiles 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,315 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742373_1549 (size=121) 2024-12-02T15:25:20,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742373_1549 (size=121) 2024-12-02T15:25:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742372_1548 (size=121) 2024-12-02T15:25:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742372_1548 (size=121) 2024-12-02T15:25:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742372_1548 (size=121) 2024-12-02T15:25:20,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742373_1549 (size=121) 2024-12-02T15:25:20,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:20,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:20,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-12-02T15:25:20,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/be0458f36726:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-12-02T15:25:20,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-12-02T15:25:20,322 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:20,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-12-02T15:25:20,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:20,322 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:20,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:20,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a570bf33c6368beb80e3083d8a523164 in 599 msec 2024-12-02T15:25:20,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=241, resume processing ppid=239 2024-12-02T15:25:20,325 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-02T15:25:20,325 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b in 599 msec 2024-12-02T15:25:20,325 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-02T15:25:20,326 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-02T15:25:20,326 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,326 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742374_1550 (size=663) 2024-12-02T15:25:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742374_1550 (size=663) 2024-12-02T15:25:20,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742374_1550 (size=663) 2024-12-02T15:25:20,335 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-02T15:25:20,340 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-02T15:25:20,340 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,341 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-02T15:25:20,341 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-12-02T15:25:20,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T15:25:20,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 628 msec 2024-12-02T15:25:20,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-12-02T15:25:20,853 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T15:25:20,853 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853 2024-12-02T15:25:20,853 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:36329, tgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853, rawTgtDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853, srcFsUri=hdfs://localhost:36329, srcDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:25:20,879 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:36329, inputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657 2024-12-02T15:25:20,879 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,881 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-12-02T15:25:20,888 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:20,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742375_1551 (size=198) 2024-12-02T15:25:20,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742375_1551 (size=198) 2024-12-02T15:25:20,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742375_1551 (size=198) 2024-12-02T15:25:20,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742376_1552 (size=663) 2024-12-02T15:25:20,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742376_1552 (size=663) 2024-12-02T15:25:20,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742376_1552 (size=663) 2024-12-02T15:25:20,904 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-common/target/hbase-common-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:20,904 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-protocol-shaded/target/hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:20,905 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-client/target/hbase-client-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,784 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0010_000001 (auth:SIMPLE) from 127.0.0.1:32974 2024-12-02T15:25:21,804 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000001/launch_container.sh] 2024-12-02T15:25:21,804 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000001/container_tokens] 2024-12-02T15:25:21,804 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_2/usercache/jenkins/appcache/application_1733152780087_0010/container_1733152780087_0010_01_000001/sysfs] 2024-12-02T15:25:21,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-2446923039564514644.jar 2024-12-02T15:25:21,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,890 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-hadoop-compat/target/hbase-hadoop-compat-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,945 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop-5030022507175703914.jar 2024-12-02T15:25:21,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics/target/hbase-metrics-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-metrics-api/target/hbase-metrics-api-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-replication/target/hbase-replication-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,946 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-http/target/hbase-http-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-procedure/target/hbase-procedure-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-zookeeper/target/hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar 2024-12-02T15:25:21,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-02T15:25:21,947 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-02T15:25:21,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-02T15:25:21,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-02T15:25:21,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-02T15:25:21,948 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-02T15:25:21,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-02T15:25:21,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-02T15:25:21,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-02T15:25:21,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-02T15:25:21,949 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-02T15:25:21,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:25:21,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-02T15:25:22,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742377_1553 (size=24020) 2024-12-02T15:25:22,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742377_1553 (size=24020) 2024-12-02T15:25:22,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742377_1553 (size=24020) 2024-12-02T15:25:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742378_1554 (size=77755) 2024-12-02T15:25:22,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742378_1554 (size=77755) 2024-12-02T15:25:22,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742378_1554 (size=77755) 2024-12-02T15:25:22,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742379_1555 (size=6424745) 2024-12-02T15:25:22,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742379_1555 (size=6424745) 2024-12-02T15:25:22,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742379_1555 (size=6424745) 2024-12-02T15:25:22,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742380_1556 (size=131360) 2024-12-02T15:25:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742380_1556 (size=131360) 2024-12-02T15:25:22,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742380_1556 (size=131360) 2024-12-02T15:25:22,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742381_1557 (size=111793) 2024-12-02T15:25:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742381_1557 (size=111793) 2024-12-02T15:25:22,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742381_1557 (size=111793) 2024-12-02T15:25:22,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742382_1558 (size=1832290) 2024-12-02T15:25:22,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742382_1558 (size=1832290) 2024-12-02T15:25:22,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742382_1558 (size=1832290) 2024-12-02T15:25:22,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742383_1559 (size=443171) 2024-12-02T15:25:22,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742383_1559 (size=443171) 2024-12-02T15:25:22,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742383_1559 (size=443171) 2024-12-02T15:25:22,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742384_1560 (size=8360005) 2024-12-02T15:25:22,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742384_1560 (size=8360005) 2024-12-02T15:25:22,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742384_1560 (size=8360005) 2024-12-02T15:25:22,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742385_1561 (size=503880) 2024-12-02T15:25:22,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742385_1561 (size=503880) 2024-12-02T15:25:22,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742385_1561 (size=503880) 2024-12-02T15:25:22,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742386_1562 (size=322274) 2024-12-02T15:25:22,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742386_1562 (size=322274) 2024-12-02T15:25:22,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742386_1562 (size=322274) 2024-12-02T15:25:22,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742387_1563 (size=20406) 2024-12-02T15:25:22,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742387_1563 (size=20406) 2024-12-02T15:25:22,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742387_1563 (size=20406) 2024-12-02T15:25:22,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742388_1564 (size=45609) 2024-12-02T15:25:22,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742388_1564 (size=45609) 2024-12-02T15:25:22,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742388_1564 (size=45609) 2024-12-02T15:25:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742389_1565 (size=136454) 2024-12-02T15:25:22,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742389_1565 (size=136454) 2024-12-02T15:25:22,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742389_1565 (size=136454) 2024-12-02T15:25:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742390_1566 (size=1597136) 2024-12-02T15:25:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742390_1566 (size=1597136) 2024-12-02T15:25:22,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742390_1566 (size=1597136) 2024-12-02T15:25:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742391_1567 (size=30873) 2024-12-02T15:25:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742391_1567 (size=30873) 2024-12-02T15:25:22,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742391_1567 (size=30873) 2024-12-02T15:25:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742392_1568 (size=29229) 2024-12-02T15:25:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742392_1568 (size=29229) 2024-12-02T15:25:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742392_1568 (size=29229) 2024-12-02T15:25:22,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742393_1569 (size=903849) 2024-12-02T15:25:22,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742393_1569 (size=903849) 2024-12-02T15:25:22,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742393_1569 (size=903849) 2024-12-02T15:25:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742394_1570 (size=5175431) 2024-12-02T15:25:22,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742394_1570 (size=5175431) 2024-12-02T15:25:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742394_1570 (size=5175431) 2024-12-02T15:25:22,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742395_1571 (size=232881) 2024-12-02T15:25:22,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742395_1571 (size=232881) 2024-12-02T15:25:22,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742395_1571 (size=232881) 2024-12-02T15:25:22,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742396_1572 (size=1323991) 2024-12-02T15:25:22,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742396_1572 (size=1323991) 2024-12-02T15:25:22,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742396_1572 (size=1323991) 2024-12-02T15:25:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742397_1573 (size=4695811) 2024-12-02T15:25:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742397_1573 (size=4695811) 2024-12-02T15:25:22,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742397_1573 (size=4695811) 2024-12-02T15:25:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742398_1574 (size=1877034) 2024-12-02T15:25:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742398_1574 (size=1877034) 2024-12-02T15:25:22,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742398_1574 (size=1877034) 2024-12-02T15:25:22,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742399_1575 (size=217555) 2024-12-02T15:25:22,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742399_1575 (size=217555) 2024-12-02T15:25:22,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742399_1575 (size=217555) 2024-12-02T15:25:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742400_1576 (size=4188619) 2024-12-02T15:25:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742400_1576 (size=4188619) 2024-12-02T15:25:22,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742400_1576 (size=4188619) 2024-12-02T15:25:22,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742401_1577 (size=127628) 2024-12-02T15:25:22,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742401_1577 (size=127628) 2024-12-02T15:25:22,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742401_1577 (size=127628) 2024-12-02T15:25:22,358 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-02T15:25:22,361 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-02T15:25:22,363 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=8.0 K 2024-12-02T15:25:22,363 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=5.3 K 2024-12-02T15:25:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742402_1578 (size=469) 2024-12-02T15:25:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742402_1578 (size=469) 2024-12-02T15:25:22,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742402_1578 (size=469) 2024-12-02T15:25:22,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742403_1579 (size=21) 2024-12-02T15:25:22,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742403_1579 (size=21) 2024-12-02T15:25:22,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742403_1579 (size=21) 2024-12-02T15:25:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742404_1580 (size=304251) 2024-12-02T15:25:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742404_1580 (size=304251) 2024-12-02T15:25:22,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742404_1580 (size=304251) 2024-12-02T15:25:22,413 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:25:22,413 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-02T15:25:22,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9d2126893e3de75703fde3d34993d9c3, had cached 0 bytes from a total of 8256 2024-12-02T15:25:22,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a07dd0e3653e17be2e0e596d280713d4, had cached 0 bytes from a total of 5356 2024-12-02T15:25:22,686 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0011_000001 (auth:SIMPLE) from 127.0.0.1:42802 2024-12-02T15:25:23,438 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:25:23,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:23,463 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-02T15:25:23,463 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-02T15:25:27,645 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0011_000001 (auth:SIMPLE) from 127.0.0.1:60602 2024-12-02T15:25:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742405_1581 (size=349973) 2024-12-02T15:25:28,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742405_1581 (size=349973) 2024-12-02T15:25:28,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742405_1581 (size=349973) 2024-12-02T15:25:28,966 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:25:30,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0011_000001 (auth:SIMPLE) from 127.0.0.1:57868 2024-12-02T15:25:30,226 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0011_000001 (auth:SIMPLE) from 127.0.0.1:42150 2024-12-02T15:25:31,781 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:25:35,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742406_1582 (size=5424) 2024-12-02T15:25:35,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742406_1582 (size=5424) 2024-12-02T15:25:35,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742406_1582 (size=5424) 2024-12-02T15:25:35,477 WARN [regionserver/be0458f36726:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 3, running: 1 2024-12-02T15:25:36,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742408_1584 (size=8188) 2024-12-02T15:25:36,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742408_1584 (size=8188) 2024-12-02T15:25:36,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742408_1584 (size=8188) 2024-12-02T15:25:36,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742407_1583 (size=22217) 2024-12-02T15:25:36,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742407_1583 (size=22217) 2024-12-02T15:25:36,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742407_1583 (size=22217) 2024-12-02T15:25:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742409_1585 (size=477) 2024-12-02T15:25:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742409_1585 (size=477) 2024-12-02T15:25:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742409_1585 (size=477) 2024-12-02T15:25:36,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742410_1586 (size=22217) 2024-12-02T15:25:36,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742410_1586 (size=22217) 2024-12-02T15:25:36,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742410_1586 (size=22217) 2024-12-02T15:25:36,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742411_1587 (size=349973) 2024-12-02T15:25:36,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742411_1587 (size=349973) 2024-12-02T15:25:36,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742411_1587 (size=349973) 2024-12-02T15:25:36,649 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000002/launch_container.sh] 2024-12-02T15:25:36,649 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000002/container_tokens] 2024-12-02T15:25:36,649 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-0_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000002/sysfs] 2024-12-02T15:25:36,661 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733152780087_0011_000001 (auth:SIMPLE) from 127.0.0.1:47436 2024-12-02T15:25:38,581 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-12-02T15:25:38,581 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-12-02T15:25:38,586 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,586 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-12-02T15:25:38,586 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-12-02T15:25:38,586 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,587 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T15:25:38,587 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T15:25:38,587 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-205930424_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,587 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-02T15:25:38,587 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/export-test/export-1733153120853/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-02T15:25:38,592 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T15:25:38,595 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153138595"}]},"ts":"1733153138595"} 2024-12-02T15:25:38,596 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-02T15:25:38,596 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-02T15:25:38,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-02T15:25:38,598 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, UNASSIGN}, {pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, UNASSIGN}] 2024-12-02T15:25:38,598 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, UNASSIGN 2024-12-02T15:25:38,599 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, UNASSIGN 2024-12-02T15:25:38,599 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=a570bf33c6368beb80e3083d8a523164, regionState=CLOSING, regionLocation=be0458f36726,43025,1733152773677 2024-12-02T15:25:38,599 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=4e2f4d4a8d7c58d9e796d0b446a7db4b, regionState=CLOSING, regionLocation=be0458f36726,35155,1733152773848 2024-12-02T15:25:38,600 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=CLOSING, location=be0458f36726,43025,1733152773677, table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-02T15:25:38,600 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=245, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, UNASSIGN because future has completed 2024-12-02T15:25:38,601 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:25:38,601 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848}] 2024-12-02T15:25:38,601 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=244, ppid=243, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, UNASSIGN because future has completed 2024-12-02T15:25:38,601 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-02T15:25:38,601 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677}] 2024-12-02T15:25:38,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T15:25:38,754 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(122): Close 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:38,755 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:25:38,755 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(122): Close a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:38,755 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1722): Closing 4e2f4d4a8d7c58d9e796d0b446a7db4b, disabling compactions & flushes 2024-12-02T15:25:38,755 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-12-02T15:25:38,755 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1722): Closing a570bf33c6368beb80e3083d8a523164, disabling compactions & flushes 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. after waiting 0 ms 2024-12-02T15:25:38,756 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. after waiting 0 ms 2024-12-02T15:25:38,756 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:38,764 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:25:38,764 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-02T15:25:38,765 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:25:38,765 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164. 2024-12-02T15:25:38,765 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] regionserver.HRegion(1676): Region close journal for a570bf33c6368beb80e3083d8a523164: Waiting for close lock at 1733153138755Running coprocessor pre-close hooks at 1733153138756 (+1 ms)Disabling compacts and flushes for region at 1733153138756Disabling writes for close at 1733153138756Writing region close event to WAL at 1733153138758 (+2 ms)Running coprocessor post-close hooks at 1733153138765 (+7 ms)Closed at 1733153138765 2024-12-02T15:25:38,767 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:25:38,767 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b. 2024-12-02T15:25:38,767 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] regionserver.HRegion(1676): Region close journal for 4e2f4d4a8d7c58d9e796d0b446a7db4b: Waiting for close lock at 1733153138755Running coprocessor pre-close hooks at 1733153138755Disabling compacts and flushes for region at 1733153138755Disabling writes for close at 1733153138756 (+1 ms)Writing region close event to WAL at 1733153138758 (+2 ms)Running coprocessor post-close hooks at 1733153138767 (+9 ms)Closed at 1733153138767 2024-12-02T15:25:38,768 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=247}] handler.UnassignRegionHandler(157): Closed a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:38,769 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=244 updating hbase:meta row=a570bf33c6368beb80e3083d8a523164, regionState=CLOSED 2024-12-02T15:25:38,769 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION, pid=246}] handler.UnassignRegionHandler(157): Closed 4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:38,771 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=245 updating hbase:meta row=4e2f4d4a8d7c58d9e796d0b446a7db4b, regionState=CLOSED 2024-12-02T15:25:38,775 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=244, state=RUNNABLE, hasLock=false; CloseRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677 because future has completed 2024-12-02T15:25:38,776 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=246, ppid=245, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848 because future has completed 2024-12-02T15:25:38,778 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=247, resume processing ppid=244 2024-12-02T15:25:38,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-12-02T15:25:38,779 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseRegionProcedure 4e2f4d4a8d7c58d9e796d0b446a7db4b, server=be0458f36726,35155,1733152773848 in 176 msec 2024-12-02T15:25:38,779 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=244, state=SUCCESS, hasLock=false; CloseRegionProcedure a570bf33c6368beb80e3083d8a523164, server=be0458f36726,43025,1733152773677 in 175 msec 2024-12-02T15:25:38,779 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=a570bf33c6368beb80e3083d8a523164, UNASSIGN in 180 msec 2024-12-02T15:25:38,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=245, resume processing ppid=243 2024-12-02T15:25:38,780 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, ppid=243, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=4e2f4d4a8d7c58d9e796d0b446a7db4b, UNASSIGN in 181 msec 2024-12-02T15:25:38,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=243, resume processing ppid=242 2024-12-02T15:25:38,781 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 183 msec 2024-12-02T15:25:38,782 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733153138782"}]},"ts":"1733153138782"} 2024-12-02T15:25:38,784 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-02T15:25:38,784 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-02T15:25:38,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 192 msec 2024-12-02T15:25:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-12-02T15:25:38,914 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T15:25:38,916 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] procedure2.ProcedureExecutor(1139): Stored pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,921 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=248, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,923 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=248, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,925 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=43025 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,926 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:38,926 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:38,928 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/recovered.edits] 2024-12-02T15:25:38,928 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf, FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/recovered.edits] 2024-12-02T15:25:38,938 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/cf/22ac802f66f54a2ebdec7a2eb7d0bb7b 2024-12-02T15:25:38,938 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9 to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/cf/39fce440bbfb411eb7821694ae84d5c9 2024-12-02T15:25:38,941 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164/recovered.edits/9.seqid 2024-12-02T15:25:38,941 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/recovered.edits/9.seqid to hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b/recovered.edits/9.seqid 2024-12-02T15:25:38,942 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/a570bf33c6368beb80e3083d8a523164 2024-12-02T15:25:38,942 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testtb-testExportFileSystemStateWithSkipTmp/4e2f4d4a8d7c58d9e796d0b446a7db4b 2024-12-02T15:25:38,942 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-02T15:25:38,944 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=248, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,947 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-02T15:25:38,949 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-02T15:25:38,951 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=248, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,951 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-02T15:25:38,951 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153138951"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:38,951 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733153138951"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:38,953 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-12-02T15:25:38,953 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => a570bf33c6368beb80e3083d8a523164, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733153118173.a570bf33c6368beb80e3083d8a523164.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4e2f4d4a8d7c58d9e796d0b446a7db4b, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733153118173.4e2f4d4a8d7c58d9e796d0b446a7db4b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-02T15:25:38,953 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-02T15:25:38,954 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733153138953"}]},"ts":"9223372036854775807"} 2024-12-02T15:25:38,956 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-02T15:25:38,956 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=248, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:38,958 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 40 msec 2024-12-02T15:25:39,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,001 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T15:25:39,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T15:25:39,002 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T15:25:39,003 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-02T15:25:39,009 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,009 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:39,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-02T15:25:39,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=248 2024-12-02T15:25:39,011 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:39,011 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:39,011 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:39,011 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,012 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-12-02T15:25:39,012 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-02T15:25:39,017 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T15:25:39,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,020 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-12-02T15:25:39,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:39,041 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=823 (was 814) Potentially hanging thread: process reaper (pid 147545) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:33444 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:50946 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:33442 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_603551452_22 at /127.0.0.1:54748 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (834363022) connection to localhost/127.0.0.1:33041 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-8799 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-205930424_22 at /127.0.0.1:50944 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33041 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1103281992_1 at /127.0.0.1:54724 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=816 (was 816), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=729 (was 767), ProcessCount=23 (was 23), AvailableMemoryMB=975 (was 874) - AvailableMemoryMB LEAK? - 2024-12-02T15:25:39,041 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=823 is superior to 500 2024-12-02T15:25:39,041 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-12-02T15:25:39,047 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59449b25{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T15:25:39,050 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3fa648e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:25:39,050 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:25:39,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5f1729d0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T15:25:39,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d3607f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:25:39,061 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733152780087_0011_01_000001 is : 143 2024-12-02T15:25:39,072 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000001/launch_container.sh] 2024-12-02T15:25:39,073 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000001/container_tokens] 2024-12-02T15:25:39,073 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_1/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000001/sysfs] 2024-12-02T15:25:40,595 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000003/launch_container.sh] 2024-12-02T15:25:40,596 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000003/container_tokens] 2024-12-02T15:25:40,596 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test/data/MiniMRCluster_126270/yarn-6841854118/MiniMRCluster_126270-localDir-nm-1_2/usercache/jenkins/appcache/application_1733152780087_0011/container_1733152780087_0011_01_000003/sysfs] 2024-12-02T15:25:43,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-02T15:25:44,292 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:25:56,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@126061c1{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-02T15:25:56,071 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6598a136{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:25:56,072 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:25:56,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@681c7fea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T15:25:56,072 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f604dfa{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:26:01,782 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:26:07,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9d2126893e3de75703fde3d34993d9c3, had cached 0 bytes from a total of 8256 2024-12-02T15:26:07,424 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region a07dd0e3653e17be2e0e596d280713d4, had cached 0 bytes from a total of 5356 2024-12-02T15:26:13,085 ERROR [Thread[Thread-402,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T15:26:13,086 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@c3e40d6{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-02T15:26:13,086 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@75e1533{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:26:13,086 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:26:13,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@40318239{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T15:26:13,087 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5edc3560{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:26:13,089 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-02T15:26:13,093 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-02T15:26:13,093 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-02T15:26:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741830_1006 (size=1152645) 2024-12-02T15:26:13,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741830_1006 (size=1152645) 2024-12-02T15:26:13,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741830_1006 (size=1152645) 2024-12-02T15:26:13,098 ERROR [Thread[Thread-423,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T15:26:13,101 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2edf2e84{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-02T15:26:13,101 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1cc97fcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:26:13,102 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:26:13,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@599e50d1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-02T15:26:13,102 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37232c97{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:26:13,103 ERROR [Thread[Thread-384,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-02T15:26:13,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-12-02T15:26:13,103 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-02T15:26:13,103 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T15:26:13,103 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:26:13,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,104 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,104 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-02T15:26:13,104 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1295999132, stopped=false 2024-12-02T15:26:13,104 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-02T15:26:13,104 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,104 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T15:26:13,104 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=be0458f36726,34441,1733152772730 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T15:26:13,149 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-02T15:26:13,149 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:26:13,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:26:13,149 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:26:13,151 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-02T15:26:13,151 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:26:13,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,151 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:26:13,151 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:26:13,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:26:13,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-02T15:26:13,152 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'be0458f36726,43025,1733152773677' ***** 2024-12-02T15:26:13,153 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T15:26:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'be0458f36726,45405,1733152773787' ***** 2024-12-02T15:26:13,153 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T15:26:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'be0458f36726,35155,1733152773848' ***** 2024-12-02T15:26:13,153 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,153 INFO [RS:0;be0458f36726:43025 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T15:26:13,153 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-02T15:26:13,153 INFO [RS:1;be0458f36726:45405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T15:26:13,153 INFO [RS:0;be0458f36726:43025 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T15:26:13,153 INFO [RS:2;be0458f36726:35155 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-02T15:26:13,153 INFO [RS:1;be0458f36726:45405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T15:26:13,153 INFO [RS:2;be0458f36726:35155 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-02T15:26:13,154 INFO [RS:1;be0458f36726:45405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T15:26:13,154 INFO [RS:0;be0458f36726:43025 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T15:26:13,154 INFO [RS:2;be0458f36726:35155 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-02T15:26:13,154 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(3091): Received CLOSE for a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:26:13,154 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(3091): Received CLOSE for 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:26:13,154 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(3091): Received CLOSE for 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:26:13,154 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T15:26:13,154 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T15:26:13,154 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-02T15:26:13,154 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(959): stopping server be0458f36726,35155,1733152773848 2024-12-02T15:26:13,154 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(959): stopping server be0458f36726,45405,1733152773787 2024-12-02T15:26:13,154 INFO [RS:1;be0458f36726:45405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T15:26:13,154 INFO [RS:2;be0458f36726:35155 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T15:26:13,154 INFO [RS:1;be0458f36726:45405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;be0458f36726:45405. 2024-12-02T15:26:13,154 INFO [RS:2;be0458f36726:35155 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;be0458f36726:35155. 2024-12-02T15:26:13,154 DEBUG [RS:1;be0458f36726:45405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:26:13,154 DEBUG [RS:1;be0458f36726:45405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,155 INFO [RS:1;be0458f36726:45405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T15:26:13,155 INFO [RS:1;be0458f36726:45405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T15:26:13,155 INFO [RS:1;be0458f36726:45405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T15:26:13,155 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-02T15:26:13,155 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(959): stopping server be0458f36726,43025,1733152773677 2024-12-02T15:26:13,155 INFO [RS:0;be0458f36726:43025 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T15:26:13,155 INFO [RS:0;be0458f36726:43025 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;be0458f36726:43025. 2024-12-02T15:26:13,154 DEBUG [RS:2;be0458f36726:35155 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:26:13,155 DEBUG [RS:0;be0458f36726:43025 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:26:13,155 DEBUG [RS:2;be0458f36726:35155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,155 DEBUG [RS:0;be0458f36726:43025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9d2126893e3de75703fde3d34993d9c3, disabling compactions & flushes 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a07dd0e3653e17be2e0e596d280713d4, disabling compactions & flushes 2024-12-02T15:26:13,155 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 82c95700d9ff2efea2507bd84223c4b0, disabling compactions & flushes 2024-12-02T15:26:13,155 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:26:13,155 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T15:26:13,155 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-02T15:26:13,155 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. after waiting 0 ms 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. after waiting 0 ms 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. after waiting 0 ms 2024-12-02T15:26:13,155 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1325): Online Regions={82c95700d9ff2efea2507bd84223c4b0=hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0.} 2024-12-02T15:26:13,155 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1325): Online Regions={a07dd0e3653e17be2e0e596d280713d4=testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4.} 2024-12-02T15:26:13,155 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:26:13,155 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:26:13,155 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1325): Online Regions={9d2126893e3de75703fde3d34993d9c3=testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3., 1588230740=hbase:meta,,1.1588230740} 2024-12-02T15:26:13,156 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 82c95700d9ff2efea2507bd84223c4b0 1/1 column families, dataSize=1.65 KB heapSize=3.90 KB 2024-12-02T15:26:13,156 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-02T15:26:13,156 DEBUG [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9d2126893e3de75703fde3d34993d9c3 2024-12-02T15:26:13,156 DEBUG [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1351): Waiting on 82c95700d9ff2efea2507bd84223c4b0 2024-12-02T15:26:13,156 DEBUG [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1351): Waiting on a07dd0e3653e17be2e0e596d280713d4 2024-12-02T15:26:13,156 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-02T15:26:13,156 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-02T15:26:13,156 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-02T15:26:13,156 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-02T15:26:13,156 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=85.70 KB heapSize=135.56 KB 2024-12-02T15:26:13,167 INFO [regionserver/be0458f36726:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/a07dd0e3653e17be2e0e596d280713d4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/default/testExportExpiredSnapshot/9d2126893e3de75703fde3d34993d9c3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,168 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:26:13,168 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9d2126893e3de75703fde3d34993d9c3: Waiting for close lock at 1733153173154Running coprocessor pre-close hooks at 1733153173155 (+1 ms)Disabling compacts and flushes for region at 1733153173155Disabling writes for close at 1733153173155Writing region close event to WAL at 1733153173157 (+2 ms)Running coprocessor post-close hooks at 1733153173168 (+11 ms)Closed at 1733153173168 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a07dd0e3653e17be2e0e596d280713d4: Waiting for close lock at 1733153173154Running coprocessor pre-close hooks at 1733153173155 (+1 ms)Disabling compacts and flushes for region at 1733153173155Disabling writes for close at 1733153173155Writing region close event to WAL at 1733153173157 (+2 ms)Running coprocessor post-close hooks at 1733153173168 (+11 ms)Closed at 1733153173168 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3. 2024-12-02T15:26:13,168 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733153032065.a07dd0e3653e17be2e0e596d280713d4. 2024-12-02T15:26:13,180 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/.tmp/l/a414e18386c74c81a7ebcd2c5d6abb4c is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733153030154/DeleteFamily/seqid=0 2024-12-02T15:26:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742412_1588 (size=5860) 2024-12-02T15:26:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742412_1588 (size=5860) 2024-12-02T15:26:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742412_1588 (size=5860) 2024-12-02T15:26:13,185 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=31 (bloomFilter=false), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/.tmp/l/a414e18386c74c81a7ebcd2c5d6abb4c 2024-12-02T15:26:13,187 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/info/cf80c8f32ab84a3b81e8784924ca8415 is 173, key is testExportExpiredSnapshot,1,1733153032065.9d2126893e3de75703fde3d34993d9c3./info:regioninfo/1733153032436/Put/seqid=0 2024-12-02T15:26:13,190 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a414e18386c74c81a7ebcd2c5d6abb4c 2024-12-02T15:26:13,190 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/.tmp/l/a414e18386c74c81a7ebcd2c5d6abb4c as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/l/a414e18386c74c81a7ebcd2c5d6abb4c 2024-12-02T15:26:13,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742413_1589 (size=15646) 2024-12-02T15:26:13,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742413_1589 (size=15646) 2024-12-02T15:26:13,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742413_1589 (size=15646) 2024-12-02T15:26:13,193 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=72.69 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/info/cf80c8f32ab84a3b81e8784924ca8415 2024-12-02T15:26:13,195 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for a414e18386c74c81a7ebcd2c5d6abb4c 2024-12-02T15:26:13,195 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/l/a414e18386c74c81a7ebcd2c5d6abb4c, entries=14, sequenceid=31, filesize=5.7 K 2024-12-02T15:26:13,196 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 82c95700d9ff2efea2507bd84223c4b0 in 41ms, sequenceid=31, compaction requested=false 2024-12-02T15:26:13,199 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/acl/82c95700d9ff2efea2507bd84223c4b0/recovered.edits/34.seqid, newMaxSeqId=34, maxSeqId=1 2024-12-02T15:26:13,200 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,200 INFO [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:26:13,200 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 82c95700d9ff2efea2507bd84223c4b0: Waiting for close lock at 1733153173155Running coprocessor pre-close hooks at 1733153173155Disabling compacts and flushes for region at 1733153173155Disabling writes for close at 1733153173155Obtaining lock to block concurrent updates at 1733153173156 (+1 ms)Preparing flush snapshotting stores in 82c95700d9ff2efea2507bd84223c4b0 at 1733153173156Finished memstore snapshotting hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0., syncing WAL and waiting on mvcc, flushsize=dataSize=1694, getHeapSize=3976, getOffHeapSize=0, getCellsCount=27 at 1733153173156Flushing stores of hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. at 1733153173156Flushing 82c95700d9ff2efea2507bd84223c4b0/l: creating writer at 1733153173156Flushing 82c95700d9ff2efea2507bd84223c4b0/l: appending metadata at 1733153173180 (+24 ms)Flushing 82c95700d9ff2efea2507bd84223c4b0/l: closing flushed file at 1733153173180Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@f483318: reopening flushed file at 1733153173190 (+10 ms)Finished flush of dataSize ~1.65 KB/1694, heapSize ~3.88 KB/3976, currentSize=0 B/0 for 82c95700d9ff2efea2507bd84223c4b0 in 41ms, sequenceid=31, compaction requested=false at 1733153173196 (+6 ms)Writing region close event to WAL at 1733153173197 (+1 ms)Running coprocessor post-close hooks at 1733153173200 (+3 ms)Closed at 1733153173200 2024-12-02T15:26:13,200 DEBUG [RS_CLOSE_REGION-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733152776657.82c95700d9ff2efea2507bd84223c4b0. 2024-12-02T15:26:13,210 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/ns/c9053ce9ece147579835ca694dc8d3a4 is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b./ns:/1733153030176/DeleteFamily/seqid=0 2024-12-02T15:26:13,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742414_1590 (size=8378) 2024-12-02T15:26:13,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742414_1590 (size=8378) 2024-12-02T15:26:13,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742414_1590 (size=8378) 2024-12-02T15:26:13,217 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/ns/c9053ce9ece147579835ca694dc8d3a4 2024-12-02T15:26:13,236 INFO [regionserver/be0458f36726:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,238 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/rep_barrier/a43e6d1ab5d94af2b8d51aae7c9432ea is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b./rep_barrier:/1733153030176/DeleteFamily/seqid=0 2024-12-02T15:26:13,240 INFO [regionserver/be0458f36726:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742415_1591 (size=8717) 2024-12-02T15:26:13,243 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/rep_barrier/a43e6d1ab5d94af2b8d51aae7c9432ea 2024-12-02T15:26:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742415_1591 (size=8717) 2024-12-02T15:26:13,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742415_1591 (size=8717) 2024-12-02T15:26:13,262 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/table/761e063fc9d84a87af93ca32efb5d129 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733153013788.87c318bcdd942ee5ef8fe681967ac54b./table:/1733153030176/DeleteFamily/seqid=0 2024-12-02T15:26:13,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742416_1592 (size=9531) 2024-12-02T15:26:13,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742416_1592 (size=9531) 2024-12-02T15:26:13,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742416_1592 (size=9531) 2024-12-02T15:26:13,268 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=236 (bloomFilter=true), to=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/table/761e063fc9d84a87af93ca32efb5d129 2024-12-02T15:26:13,272 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/info/cf80c8f32ab84a3b81e8784924ca8415 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/info/cf80c8f32ab84a3b81e8784924ca8415 2024-12-02T15:26:13,276 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/info/cf80c8f32ab84a3b81e8784924ca8415, entries=84, sequenceid=236, filesize=15.3 K 2024-12-02T15:26:13,277 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/ns/c9053ce9ece147579835ca694dc8d3a4 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/ns/c9053ce9ece147579835ca694dc8d3a4 2024-12-02T15:26:13,282 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/ns/c9053ce9ece147579835ca694dc8d3a4, entries=28, sequenceid=236, filesize=8.2 K 2024-12-02T15:26:13,283 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/rep_barrier/a43e6d1ab5d94af2b8d51aae7c9432ea as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/rep_barrier/a43e6d1ab5d94af2b8d51aae7c9432ea 2024-12-02T15:26:13,288 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/rep_barrier/a43e6d1ab5d94af2b8d51aae7c9432ea, entries=26, sequenceid=236, filesize=8.5 K 2024-12-02T15:26:13,288 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/.tmp/table/761e063fc9d84a87af93ca32efb5d129 as hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/table/761e063fc9d84a87af93ca32efb5d129 2024-12-02T15:26:13,293 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/table/761e063fc9d84a87af93ca32efb5d129, entries=43, sequenceid=236, filesize=9.3 K 2024-12-02T15:26:13,294 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~85.70 KB/87755, heapSize ~135.50 KB/138752, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=236, compaction requested=false 2024-12-02T15:26:13,297 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/data/hbase/meta/1588230740/recovered.edits/239.seqid, newMaxSeqId=239, maxSeqId=1 2024-12-02T15:26:13,298 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:13,298 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T15:26:13,298 INFO [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-02T15:26:13,298 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733153173156Running coprocessor pre-close hooks at 1733153173156Disabling compacts and flushes for region at 1733153173156Disabling writes for close at 1733153173156Obtaining lock to block concurrent updates at 1733153173156Preparing flush snapshotting stores in 1588230740 at 1733153173156Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=87755, getHeapSize=138752, getOffHeapSize=0, getCellsCount=663 at 1733153173156Flushing stores of hbase:meta,,1.1588230740 at 1733153173157 (+1 ms)Flushing 1588230740/info: creating writer at 1733153173157Flushing 1588230740/info: appending metadata at 1733153173187 (+30 ms)Flushing 1588230740/info: closing flushed file at 1733153173187Flushing 1588230740/ns: creating writer at 1733153173197 (+10 ms)Flushing 1588230740/ns: appending metadata at 1733153173210 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733153173210Flushing 1588230740/rep_barrier: creating writer at 1733153173221 (+11 ms)Flushing 1588230740/rep_barrier: appending metadata at 1733153173237 (+16 ms)Flushing 1588230740/rep_barrier: closing flushed file at 1733153173237Flushing 1588230740/table: creating writer at 1733153173249 (+12 ms)Flushing 1588230740/table: appending metadata at 1733153173262 (+13 ms)Flushing 1588230740/table: closing flushed file at 1733153173262Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@24231767: reopening flushed file at 1733153173271 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a215c3e: reopening flushed file at 1733153173276 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b3c4374: reopening flushed file at 1733153173283 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44d28bd8: reopening flushed file at 1733153173288 (+5 ms)Finished flush of dataSize ~85.70 KB/87755, heapSize ~135.50 KB/138752, currentSize=0 B/0 for 1588230740 in 138ms, sequenceid=236, compaction requested=false at 1733153173294 (+6 ms)Writing region close event to WAL at 1733153173295 (+1 ms)Running coprocessor post-close hooks at 1733153173298 (+3 ms)Closed at 1733153173298 2024-12-02T15:26:13,298 DEBUG [RS_CLOSE_META-regionserver/be0458f36726:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-02T15:26:13,356 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(976): stopping server be0458f36726,45405,1733152773787; all regions closed. 2024-12-02T15:26:13,356 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(976): stopping server be0458f36726,35155,1733152773848; all regions closed. 2024-12-02T15:26:13,356 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(976): stopping server be0458f36726,43025,1733152773677; all regions closed. 2024-12-02T15:26:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741834_1010 (size=19991) 2024-12-02T15:26:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741833_1009 (size=13435) 2024-12-02T15:26:13,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741834_1010 (size=19991) 2024-12-02T15:26:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741833_1009 (size=13435) 2024-12-02T15:26:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741834_1010 (size=19991) 2024-12-02T15:26:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741833_1009 (size=13435) 2024-12-02T15:26:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741836_1012 (size=99973) 2024-12-02T15:26:13,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741836_1012 (size=99973) 2024-12-02T15:26:13,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741836_1012 (size=99973) 2024-12-02T15:26:13,365 DEBUG [RS:0;be0458f36726:43025 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs 2024-12-02T15:26:13,366 INFO [RS:0;be0458f36726:43025 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL be0458f36726%2C43025%2C1733152773677:(num 1733152775706) 2024-12-02T15:26:13,366 DEBUG [RS:0;be0458f36726:43025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,366 INFO [RS:0;be0458f36726:43025 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,366 DEBUG [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs 2024-12-02T15:26:13,366 INFO [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL be0458f36726%2C45405%2C1733152773787.meta:.meta(num 1733152776225) 2024-12-02T15:26:13,366 INFO [RS:0;be0458f36726:43025 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T15:26:13,366 INFO [RS:0;be0458f36726:43025 {}] hbase.ChoreService(370): Chore service for: regionserver/be0458f36726:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T15:26:13,367 INFO [RS:0;be0458f36726:43025 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:0;be0458f36726:43025 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:0;be0458f36726:43025 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:0;be0458f36726:43025 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T15:26:13,367 DEBUG [RS:2;be0458f36726:35155 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL be0458f36726%2C35155%2C1733152773848:(num 1733152775711) 2024-12-02T15:26:13,367 DEBUG [RS:2;be0458f36726:35155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T15:26:13,367 INFO [RS:0;be0458f36726:43025 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43025 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] hbase.ChoreService(370): Chore service for: regionserver/be0458f36726:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T15:26:13,367 INFO [RS:2;be0458f36726:35155 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35155 2024-12-02T15:26:13,368 INFO [regionserver/be0458f36726:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T15:26:13,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073741835_1011 (size=9673) 2024-12-02T15:26:13,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073741835_1011 (size=9673) 2024-12-02T15:26:13,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073741835_1011 (size=9673) 2024-12-02T15:26:13,369 INFO [regionserver/be0458f36726:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T15:26:13,373 DEBUG [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/oldWALs 2024-12-02T15:26:13,373 INFO [RS:1;be0458f36726:45405 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL be0458f36726%2C45405%2C1733152773787:(num 1733152775714) 2024-12-02T15:26:13,373 DEBUG [RS:1;be0458f36726:45405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-02T15:26:13,373 INFO [RS:1;be0458f36726:45405 {}] regionserver.LeaseManager(133): Closed leases 2024-12-02T15:26:13,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/be0458f36726,43025,1733152773677 2024-12-02T15:26:13,373 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-02T15:26:13,373 INFO [RS:1;be0458f36726:45405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T15:26:13,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/be0458f36726,35155,1733152773848 2024-12-02T15:26:13,374 INFO [RS:2;be0458f36726:35155 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T15:26:13,374 INFO [RS:1;be0458f36726:45405 {}] hbase.ChoreService(370): Chore service for: regionserver/be0458f36726:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-02T15:26:13,374 INFO [RS:0;be0458f36726:43025 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T15:26:13,374 INFO [RS:1;be0458f36726:45405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T15:26:13,374 INFO [regionserver/be0458f36726:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T15:26:13,374 INFO [RS:1;be0458f36726:45405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45405 2024-12-02T15:26:13,390 INFO [RS:1;be0458f36726:45405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T15:26:13,390 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [be0458f36726,35155,1733152773848] 2024-12-02T15:26:13,391 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/be0458f36726,45405,1733152773787 2024-12-02T15:26:13,407 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/be0458f36726,35155,1733152773848 already deleted, retry=false 2024-12-02T15:26:13,407 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; be0458f36726,35155,1733152773848 expired; onlineServers=2 2024-12-02T15:26:13,407 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [be0458f36726,45405,1733152773787] 2024-12-02T15:26:13,415 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/be0458f36726,45405,1733152773787 already deleted, retry=false 2024-12-02T15:26:13,415 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; be0458f36726,45405,1733152773787 expired; onlineServers=1 2024-12-02T15:26:13,415 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [be0458f36726,43025,1733152773677] 2024-12-02T15:26:13,423 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/be0458f36726,43025,1733152773677 already deleted, retry=false 2024-12-02T15:26:13,423 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; be0458f36726,43025,1733152773677 expired; onlineServers=0 2024-12-02T15:26:13,423 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'be0458f36726,34441,1733152772730' ***** 2024-12-02T15:26:13,423 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-02T15:26:13,424 DEBUG [M:0;be0458f36726:34441 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-02T15:26:13,424 DEBUG [M:0;be0458f36726:34441 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-02T15:26:13,424 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-02T15:26:13,424 DEBUG [master/be0458f36726:0:becomeActiveMaster-HFileCleaner.large.0-1733152775144 {}] cleaner.HFileCleaner(306): Exit Thread[master/be0458f36726:0:becomeActiveMaster-HFileCleaner.large.0-1733152775144,5,FailOnTimeoutGroup] 2024-12-02T15:26:13,424 DEBUG [master/be0458f36726:0:becomeActiveMaster-HFileCleaner.small.0-1733152775161 {}] cleaner.HFileCleaner(306): Exit Thread[master/be0458f36726:0:becomeActiveMaster-HFileCleaner.small.0-1733152775161,5,FailOnTimeoutGroup] 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] hbase.ChoreService(370): Chore service for: master/be0458f36726:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-02T15:26:13,424 DEBUG [M:0;be0458f36726:34441 {}] master.HMaster(1795): Stopping service threads 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-02T15:26:13,424 INFO [M:0;be0458f36726:34441 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-02T15:26:13,425 INFO [M:0;be0458f36726:34441 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-02T15:26:13,425 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-02T15:26:13,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-02T15:26:13,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-02T15:26:13,432 DEBUG [M:0;be0458f36726:34441 {}] zookeeper.ZKUtil(347): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-02T15:26:13,432 WARN [M:0;be0458f36726:34441 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-02T15:26:13,433 INFO [M:0;be0458f36726:34441 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/.lastflushedseqids 2024-12-02T15:26:13,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41939 is added to blk_1073742417_1593 (size=329) 2024-12-02T15:26:13,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32893 is added to blk_1073742417_1593 (size=329) 2024-12-02T15:26:13,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45623 is added to blk_1073742417_1593 (size=329) 2024-12-02T15:26:13,445 INFO [M:0;be0458f36726:34441 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-02T15:26:13,445 INFO [M:0;be0458f36726:34441 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-02T15:26:13,445 DEBUG [M:0;be0458f36726:34441 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-02T15:26:13,456 INFO [M:0;be0458f36726:34441 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:26:13,456 DEBUG [M:0;be0458f36726:34441 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:26:13,456 DEBUG [M:0;be0458f36726:34441 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-02T15:26:13,456 DEBUG [M:0;be0458f36726:34441 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-02T15:26:13,456 INFO [M:0;be0458f36726:34441 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=980.38 KB heapSize=1.15 MB 2024-12-02T15:26:13,457 ERROR [AsyncFSWAL-0-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:26:13,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35155-0x10197ce54a90003, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43025-0x10197ce54a90001, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,483 INFO [RS:0;be0458f36726:43025 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T15:26:13,483 INFO [RS:2;be0458f36726:35155 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T15:26:13,483 INFO [RS:0;be0458f36726:43025 {}] regionserver.HRegionServer(1031): Exiting; stopping=be0458f36726,43025,1733152773677; zookeeper connection closed. 2024-12-02T15:26:13,483 INFO [RS:2;be0458f36726:35155 {}] regionserver.HRegionServer(1031): Exiting; stopping=be0458f36726,35155,1733152773848; zookeeper connection closed. 2024-12-02T15:26:13,484 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@29304011 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@29304011 2024-12-02T15:26:13,484 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@b896501 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@b896501 2024-12-02T15:26:13,498 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,498 INFO [RS:1;be0458f36726:45405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T15:26:13,499 DEBUG [pool-69-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45405-0x10197ce54a90002, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:26:13,499 INFO [RS:1;be0458f36726:45405 {}] regionserver.HRegionServer(1031): Exiting; stopping=be0458f36726,45405,1733152773787; zookeeper connection closed. 2024-12-02T15:26:13,499 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1d6d1144 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1d6d1144 2024-12-02T15:26:13,500 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-02T15:26:18,690 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:26:23,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:23,093 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-02T15:26:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-02T15:26:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-02T15:26:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-02T15:26:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:23,094 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-02T15:26:23,095 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-02T15:26:28,599 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:26:31,782 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:27:01,782 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;be0458f36726:34441 234 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@2acf0975 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 23 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49707025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4676 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@30d05d95 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13257 Waited count: 13914 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@441c019f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e27b061 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 930 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@66a2842b-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3124 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@262b76c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36329): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 156 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45724 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48708070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36329): State: TIMED_WAITING Blocked count: 117 Waited count: 2468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36329): State: TIMED_WAITING Blocked count: 141 Waited count: 2453 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36329): State: TIMED_WAITING Blocked count: 118 Waited count: 2448 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36329): State: TIMED_WAITING Blocked count: 126 Waited count: 2452 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36329): State: TIMED_WAITING Blocked count: 121 Waited count: 2490 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@348693b2-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 927 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43321): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 370 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fe8858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1372 Waited count: 1519 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 465 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 466 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (834363022) connection to localhost/127.0.0.1:36329 from jenkins): State: TIMED_WAITING Blocked count: 1300 Waited count: 1301 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 0 Waited count: 2102 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp512551189-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122-acceptor-0@241d394-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp512551189-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 926 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33749): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 346 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@224032a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1407 Waited count: 1517 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 464 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 478 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@dc51bb0-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 926 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36271): State: TIMED_WAITING Blocked count: 1 Waited count: 48 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 340 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e0f7dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1329 Waited count: 1503 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 468 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 474 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 477 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 473 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@10cbcc63[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7f752be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (java.util.concurrent.ThreadPoolExecutor$Worker@fcd4fa5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 243 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55609): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 240 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 48 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 244 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 232 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 245 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 367 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4805d854 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 246 (ProcessThread(sid:0 cport:55609):): State: WAITING Blocked count: 4 Waited count: 454 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f293479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 247 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 495 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ae703f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 248 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 250 (LeaseRenewer:jenkins@localhost:36329): State: TIMED_WAITING Blocked count: 13 Waited count: 481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@62d579ac Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 327 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 30 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (Time-limited test-SendThread(127.0.0.1:55609)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 263 (Time-limited test-EventThread): State: WAITING Blocked count: 23 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4603708e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 264 (NIOWorkerThread-2): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64658a66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-9): State: WAITING Blocked count: 11 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d34975f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 186 Waited count: 655 Waiting on java.util.concurrent.Semaphore$NonfairSync@61daab01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 61 Waited count: 347 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c0030b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441): State: WAITING Blocked count: 38 Waited count: 10243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74feabe8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@49cf6a4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@74f77657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@75d722ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@423b0b4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 293 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7094b47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 98 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (M:0;be0458f36726:34441): State: TIMED_WAITING Blocked count: 12 Waited count: 4002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007f8ff8f8bd80.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 361 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 367 (org.apache.hadoop.hdfs.PeerCache@3ce5434c): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 386 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4586 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45799 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d040daf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 486 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@310e31d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b50dcfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7186b93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 510 (LeaseRenewer:jenkins.hfs.2@localhost:36329): State: TIMED_WAITING Blocked count: 14 Waited count: 481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 515 (LeaseRenewer:jenkins.hfs.0@localhost:36329): State: TIMED_WAITING Blocked count: 14 Waited count: 481 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 518 (LeaseRenewer:jenkins.hfs.1@localhost:36329): State: TIMED_WAITING Blocked count: 13 Waited count: 482 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45642 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 565 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 490 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 566 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 581 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1001 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 871 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1098 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 78 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e66e86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1263 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1621 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3a4e2477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1852 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1853 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2029 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 388 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8327 (ForkJoinPool.commonPool-worker-6): State: WAITING Blocked count: 0 Waited count: 244 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10119 (AsyncFSWAL-1-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2256e26d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10122 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T15:27:31,783 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:28:01,783 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;be0458f36726:34441 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@2acf0975 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 28 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 26 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49707025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5276 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@638b2f01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13257 Waited count: 13915 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@441c019f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e27b061 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1050 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@66a2842b-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3124 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@262b76c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36329): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 176 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 177 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51644 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48708070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36329): State: TIMED_WAITING Blocked count: 117 Waited count: 2528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36329): State: TIMED_WAITING Blocked count: 141 Waited count: 2513 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36329): State: TIMED_WAITING Blocked count: 118 Waited count: 2508 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36329): State: TIMED_WAITING Blocked count: 126 Waited count: 2512 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36329): State: TIMED_WAITING Blocked count: 121 Waited count: 2550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@348693b2-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1047 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43321): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 390 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fe8858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1392 Waited count: 1559 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 525 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 526 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (834363022) connection to localhost/127.0.0.1:36329 from jenkins): State: TIMED_WAITING Blocked count: 1356 Waited count: 1357 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 0 Waited count: 2162 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp512551189-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122-acceptor-0@241d394-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp512551189-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1046 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33749): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 366 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@224032a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1427 Waited count: 1557 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 524 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 538 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 548 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 611 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@dc51bb0-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1046 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36271): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 105 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 360 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e0f7dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1349 Waited count: 1543 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 532 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 561 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 534 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 537 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@10cbcc63[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7f752be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (java.util.concurrent.ThreadPoolExecutor$Worker@fcd4fa5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 243 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55609): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 240 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 54 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 244 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 262 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 245 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 372 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4805d854 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 246 (ProcessThread(sid:0 cport:55609):): State: WAITING Blocked count: 4 Waited count: 459 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f293479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 247 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 500 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ae703f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 248 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@62d579ac Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 355 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 30 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (Time-limited test-SendThread(127.0.0.1:55609)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 263 (Time-limited test-EventThread): State: WAITING Blocked count: 23 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4603708e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 264 (NIOWorkerThread-2): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64658a66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-9): State: WAITING Blocked count: 11 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d34975f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 186 Waited count: 655 Waiting on java.util.concurrent.Semaphore$NonfairSync@61daab01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 61 Waited count: 347 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c0030b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441): State: WAITING Blocked count: 38 Waited count: 10243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74feabe8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@49cf6a4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@74f77657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@75d722ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@423b0b4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 293 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7094b47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 98 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (M:0;be0458f36726:34441): State: TIMED_WAITING Blocked count: 12 Waited count: 4002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007f8ff8f8bd80.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 361 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 367 (org.apache.hadoop.hdfs.PeerCache@3ce5434c): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 386 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5186 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c8ad7c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51802 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d040daf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 486 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@310e31d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b50dcfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7186b93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51644 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 565 (ForkJoinPool.commonPool-worker-1): State: WAITING Blocked count: 0 Waited count: 490 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 581 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1001 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 877 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1098 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 78 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e66e86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1263 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1621 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3a4e2477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1852 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1853 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2029 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 388 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 8327 (ForkJoinPool.commonPool-worker-6): State: TIMED_WAITING Blocked count: 0 Waited count: 245 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10119 (AsyncFSWAL-1-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2256e26d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10122 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T15:28:31,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:29:01,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;be0458f36726:34441 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@2acf0975 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 24 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 31 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49707025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5875 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@28adc29b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13257 Waited count: 13916 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@441c019f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e27b061 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@66a2842b-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3124 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@262b76c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36329): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 197 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57562 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48708070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36329): State: TIMED_WAITING Blocked count: 117 Waited count: 2588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36329): State: TIMED_WAITING Blocked count: 141 Waited count: 2573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36329): State: TIMED_WAITING Blocked count: 118 Waited count: 2568 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36329): State: TIMED_WAITING Blocked count: 126 Waited count: 2573 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36329): State: TIMED_WAITING Blocked count: 121 Waited count: 2610 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@348693b2-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1167 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43321): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 410 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fe8858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1412 Waited count: 1599 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 585 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 586 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (834363022) connection to localhost/127.0.0.1:36329 from jenkins): State: TIMED_WAITING Blocked count: 1416 Waited count: 1417 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 0 Waited count: 2222 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp512551189-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122-acceptor-0@241d394-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp512551189-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33749): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 386 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@224032a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1447 Waited count: 1597 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 584 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 606 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 612 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@dc51bb0-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1166 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36271): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 117 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 380 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e0f7dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1369 Waited count: 1583 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 621 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 594 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@10cbcc63[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7f752be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (java.util.concurrent.ThreadPoolExecutor$Worker@fcd4fa5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 243 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55609): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 240 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 60 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 244 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 292 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 245 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 376 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4805d854 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 246 (ProcessThread(sid:0 cport:55609):): State: WAITING Blocked count: 4 Waited count: 463 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f293479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 247 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 504 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ae703f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 248 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@62d579ac Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 383 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 30 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (Time-limited test-SendThread(127.0.0.1:55609)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 263 (Time-limited test-EventThread): State: WAITING Blocked count: 23 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4603708e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 264 (NIOWorkerThread-2): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64658a66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-9): State: WAITING Blocked count: 11 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 95 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d34975f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 186 Waited count: 655 Waiting on java.util.concurrent.Semaphore$NonfairSync@61daab01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 61 Waited count: 347 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c0030b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441): State: WAITING Blocked count: 38 Waited count: 10243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74feabe8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@49cf6a4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@74f77657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@75d722ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@423b0b4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 293 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7094b47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 98 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (M:0;be0458f36726:34441): State: TIMED_WAITING Blocked count: 12 Waited count: 4002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007f8ff8f8bd80.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 361 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 367 (org.apache.hadoop.hdfs.PeerCache@3ce5434c): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 386 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5785 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c8ad7c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57804 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d040daf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 486 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@310e31d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b50dcfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7186b93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57647 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 565 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 491 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 581 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1001 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 883 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1098 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 78 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e66e86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1263 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1621 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3a4e2477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1852 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1853 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2029 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 388 Waiting on java.util.concurrent.ForkJoinPool@7a8c5192 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10119 (AsyncFSWAL-1-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2256e26d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10122 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10123 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:29:31,784 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:29:33,993 DEBUG [master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=24, reuseRatio=70.59% 2024-12-02T15:29:33,993 DEBUG [master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-02T15:29:41,785 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-02T15:30:01,785 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;be0458f36726:34441 228 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@2acf0975 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 25 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 32 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49707025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6475 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 65 Waiting on java.util.concurrent.CountDownLatch$Sync@41e51e83 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13257 Waited count: 13917 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@441c019f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e27b061 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@66a2842b-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3124 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@262b76c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36329): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 216 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 217 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 63485 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48708070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36329): State: TIMED_WAITING Blocked count: 117 Waited count: 2649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36329): State: TIMED_WAITING Blocked count: 141 Waited count: 2633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36329): State: TIMED_WAITING Blocked count: 118 Waited count: 2628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36329): State: TIMED_WAITING Blocked count: 126 Waited count: 2633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36329): State: TIMED_WAITING Blocked count: 121 Waited count: 2671 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@348693b2-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1287 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43321): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 430 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fe8858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1432 Waited count: 1639 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 645 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 646 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (834363022) connection to localhost/127.0.0.1:36329 from jenkins): State: TIMED_WAITING Blocked count: 1476 Waited count: 1477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 0 Waited count: 2282 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp512551189-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122-acceptor-0@241d394-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp512551189-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33749): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 406 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@224032a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1467 Waited count: 1637 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 644 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 700 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 693 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 731 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@dc51bb0-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1286 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36271): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 129 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 400 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e0f7dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1389 Waited count: 1623 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 652 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 681 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 654 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a60d6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67d99e54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@10cbcc63[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7f752be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d510944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (java.util.concurrent.ThreadPoolExecutor$Worker@fcd4fa5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 243 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55609): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 240 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 66 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 244 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 322 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 245 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 380 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4805d854 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 246 (ProcessThread(sid:0 cport:55609):): State: WAITING Blocked count: 4 Waited count: 467 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f293479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 247 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 508 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ae703f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 248 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@62d579ac Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 415 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 30 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (Time-limited test-SendThread(127.0.0.1:55609)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 263 (Time-limited test-EventThread): State: WAITING Blocked count: 23 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4603708e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 264 (NIOWorkerThread-2): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64658a66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-9): State: WAITING Blocked count: 11 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 96 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d34975f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 186 Waited count: 655 Waiting on java.util.concurrent.Semaphore$NonfairSync@61daab01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 61 Waited count: 347 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c0030b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441): State: WAITING Blocked count: 38 Waited count: 10243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74feabe8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@49cf6a4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@74f77657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@75d722ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@423b0b4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 293 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7094b47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 98 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (M:0;be0458f36726:34441): State: TIMED_WAITING Blocked count: 12 Waited count: 4002 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1112/0x00007f8ff8f8bd80.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 361 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 367 (org.apache.hadoop.hdfs.PeerCache@3ce5434c): State: TIMED_WAITING Blocked count: 0 Waited count: 214 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 386 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6384 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c8ad7c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 64 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63807 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d040daf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 486 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@310e31d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b50dcfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7186b93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63649 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1001 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 889 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1098 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 78 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e66e86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1263 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1621 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3a4e2477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1852 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1853 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2029 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3464 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 389 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 10119 (AsyncFSWAL-1-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2256e26d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10123 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10128 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-02T15:30:31,785 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:31:01,785 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-02T15:31:13,459 DEBUG [M:0;be0458f36726:34441 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733153173445Disabling compacts and flushes for region at 1733153173445Disabling writes for close at 1733153173456 (+11 ms)Obtaining lock to block concurrent updates at 1733153173456Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733153173456Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1003907, getHeapSize=1203984, getOffHeapSize=0, getCellsCount=2638 at 1733153173457 (+1 ms)Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1733153473459 (+300002 ms) 2024-12-02T15:31:13,459 WARN [M:0;be0458f36726:34441 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4534, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4534, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-12-02T15:31:13,464 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:31:13,467 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-02T15:31:13,467 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-02T15:31:13,467 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 2024-12-02T15:31:13,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:31:13,470 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:31:13,470 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 2024-12-02T15:31:13,470 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;be0458f36726:34441 229 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 2 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 77 Waited count: 18 Waiting on java.lang.ref.ReferenceQueue$Lock@2acf0975 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 19 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 37 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 3 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@49707025 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7074 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 71 Waiting on java.util.concurrent.CountDownLatch$Sync@3dd37e6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 13257 Waited count: 13918 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 13 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@441c019f Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@e27b061 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@14f04a62): State: TIMED_WAITING Blocked count: 0 Waited count: 1410 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp342963407-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp342963407-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp342963407-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp342963407-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp342963407-41-acceptor-0@66a2842b-ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:45293}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp342963407-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp342963407-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp342963407-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-b071e18-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 34 Waited count: 3124 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@262b76c7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 36329): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@2a178eda): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 236 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@14179e6b): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 237 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 69408 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1375 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@48708070 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 36329): State: TIMED_WAITING Blocked count: 117 Waited count: 2710 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 36329): State: TIMED_WAITING Blocked count: 141 Waited count: 2694 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 36329): State: TIMED_WAITING Blocked count: 118 Waited count: 2688 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 36329): State: TIMED_WAITING Blocked count: 129 Waited count: 2695 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 36329): State: TIMED_WAITING Blocked count: 122 Waited count: 2733 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@5ed1041e): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@2129b88): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@2b36a401): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@68b8bdaf): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(826874655)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp902969986-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp902969986-88-acceptor-0@348693b2-ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:42033}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp902969986-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp902969986-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-becdb63-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@7efce26a): State: TIMED_WAITING Blocked count: 0 Waited count: 1407 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43321): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 0 Waited count: 450 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@20fe8858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1452 Waited count: 1680 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@47bb227d): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 705 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 706 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43321): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 114 (IPC Client (834363022) connection to localhost/127.0.0.1:36329 from jenkins): State: TIMED_WAITING Blocked count: 1525 Waited count: 1526 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 115 (IPC Parameter Sending Thread for localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 0 Waited count: 2338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp512551189-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp512551189-122-acceptor-0@241d394-ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:46597}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp512551189-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp512551189-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-26171ccc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4ade5188): State: TIMED_WAITING Blocked count: 0 Waited count: 1406 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 33749): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 426 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@224032a3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 136 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1487 Waited count: 1680 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 137 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@3c065f15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 138 (IPC Server handler 0 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 139 (IPC Server handler 1 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 704 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 2 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 3 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 755 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 4 on default port 33749): State: TIMED_WAITING Blocked count: 0 Waited count: 791 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 152 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 153 (qtp243104512-153): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f8ff842d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (qtp243104512-154-acceptor-0@dc51bb0-ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:35927}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (qtp243104512-155): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp243104512-156): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (Session-HouseKeeper-43a15b30-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@6412af41): State: TIMED_WAITING Blocked count: 0 Waited count: 1406 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 162 (IPC Server idle connection scanner for port 36271): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 164 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 141 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 167 (Command processor): State: WAITING Blocked count: 3 Waited count: 420 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2e0f7dfe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 168 (BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329): State: TIMED_WAITING Blocked count: 1409 Waited count: 1663 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 169 (pool-38-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 151 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@686bb815): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 160 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 170 (IPC Server handler 0 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 726 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 171 (IPC Server handler 1 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 172 (IPC Server handler 2 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 714 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 173 (IPC Server handler 3 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 174 (IPC Server handler 4 on default port 36271): State: TIMED_WAITING Blocked count: 0 Waited count: 713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 185 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1)): State: TIMED_WAITING Blocked count: 24 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 186 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 187 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3)): State: TIMED_WAITING Blocked count: 27 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 190 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4)): State: TIMED_WAITING Blocked count: 29 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 199 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 201 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 216 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@57a60d6d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 217 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@67d99e54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 220 (java.util.concurrent.ThreadPoolExecutor$Worker@10cbcc63[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 223 (java.util.concurrent.ThreadPoolExecutor$Worker@7f752be8[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 225 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 229 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991): State: TIMED_WAITING Blocked count: 4 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 235 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5d510944 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 238 (java.util.concurrent.ThreadPoolExecutor$Worker@fcd4fa5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 239 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 241 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 242 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 243 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:55609): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 240 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 72 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 244 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 245 (SyncThread:0): State: WAITING Blocked count: 14 Waited count: 385 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4805d854 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 246 (ProcessThread(sid:0 cport:55609):): State: WAITING Blocked count: 4 Waited count: 472 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2f293479 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 247 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 513 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7ae703f1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 248 (NIOWorkerThread-1): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@62d579ac Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 443 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 261 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 30 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (Time-limited test-SendThread(127.0.0.1:55609)): State: RUNNABLE Blocked count: 10 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 263 (Time-limited test-EventThread): State: WAITING Blocked count: 23 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4603708e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 264 (NIOWorkerThread-2): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-3): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-4): State: WAITING Blocked count: 6 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (zk-event-processor-pool-0): State: WAITING Blocked count: 25 Waited count: 70 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64658a66 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-5): State: WAITING Blocked count: 4 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-6): State: WAITING Blocked count: 7 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-7): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-8): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-9): State: WAITING Blocked count: 11 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-10): State: WAITING Blocked count: 4 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-11): State: WAITING Blocked count: 3 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-12): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-13): State: WAITING Blocked count: 0 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-14): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 278 (NIOWorkerThread-15): State: WAITING Blocked count: 5 Waited count: 97 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f4c61e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@2d34975f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 186 Waited count: 655 Waiting on java.util.concurrent.Semaphore$NonfairSync@61daab01 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 61 Waited count: 347 Waiting on java.util.concurrent.Semaphore$NonfairSync@3c0030b4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34441): State: WAITING Blocked count: 38 Waited count: 10243 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@74feabe8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=34441): State: WAITING Blocked count: 1 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7d794447 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@49cf6a4a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@74f77657 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 289 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@75d722ca Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 290 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=34441): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@423b0b4b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 293 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7094b47c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 294 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 316 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 338 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 98 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 291 (M:0;be0458f36726:34441): State: TIMED_WAITING Blocked count: 12 Waited count: 4003 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1448/0x00007f8ff92274c0.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 365 (master/be0458f36726:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 367 (org.apache.hadoop.hdfs.PeerCache@3ce5434c): State: TIMED_WAITING Blocked count: 0 Waited count: 234 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 386 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6984 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 403 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 113 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 114 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 171 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c8ad7c3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 428 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 70 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69809 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 437 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 32 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 439 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 22 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 463 (RegionServerTracker-0): State: WAITING Blocked count: 7 Waited count: 9 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1d040daf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 486 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 13 Waited count: 21 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@310e31d0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 487 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5b50dcfd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 485 (regionserver/be0458f36726:0.procedureResultReporter): State: WAITING Blocked count: 19 Waited count: 39 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7186b93 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 522 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 532 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 533 (region-location-0): State: WAITING Blocked count: 12 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 538 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 415 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69651 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 581 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 594 (region-location-1): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 595 (region-location-2): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1001 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 895 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1057 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1097 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1098 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 78 Waited count: 115 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@63e66e86 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1145 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1206 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1207 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1208 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1259 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1260 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1261 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1263 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1264 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1621 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 48 Waiting on java.util.TaskQueue@3a4e2477 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1852 (region-location-3): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1853 (region-location-4): State: WAITING Blocked count: 2 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3acc77aa Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2029 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10119 (AsyncFSWAL-1-hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData-prefix:be0458f36726,34441,1733152772730): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2256e26d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10123 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10128 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 10131 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 10132 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1436/0x00007f8ff9220000.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-02T15:31:17,471 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-02T15:31:18,464 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-02T15:31:18,464 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-02T15:31:18,465 INFO [M:0;be0458f36726:34441 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-02T15:31:18,465 INFO [M:0;be0458f36726:34441 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34441 2024-12-02T15:31:18,466 INFO [M:0;be0458f36726:34441 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-02T15:31:18,475 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:36329/user/jenkins/test-data/bc2fa6fc-f566-7dd3-66cd-44578fc4e657/MasterData/WALs/be0458f36726,34441,1733152772730/be0458f36726%2C34441%2C1733152772730.1733152774373 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-02T15:31:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:31:18,575 INFO [M:0;be0458f36726:34441 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-02T15:31:18,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34441-0x10197ce54a90000, quorum=127.0.0.1:55609, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-02T15:31:18,583 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@77f27ab{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:31:18,584 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@295c990e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:31:18,584 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:31:18,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6632e866{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T15:31:18,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26bad6ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:31:18,586 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T15:31:18,586 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T15:31:18,586 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T15:31:18,586 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1550669498-172.17.0.2-1733152767991 (Datanode Uuid fa65bf0c-5e9d-4428-808c-f8f114691ef2) service to localhost/127.0.0.1:36329 2024-12-02T15:31:18,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data5/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,588 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data6/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,588 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T15:31:18,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76c02ba1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:31:18,591 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e7a95e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:31:18,591 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:31:18,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c3f2c62{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T15:31:18,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621090f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:31:18,593 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T15:31:18,593 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T15:31:18,593 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1550669498-172.17.0.2-1733152767991 (Datanode Uuid da21253d-2aeb-4a86-a040-28953b734c51) service to localhost/127.0.0.1:36329 2024-12-02T15:31:18,593 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T15:31:18,593 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data3/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,594 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data4/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,594 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T15:31:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ae93e7a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-02T15:31:18,596 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5703ed84{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:31:18,596 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:31:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bf4a906{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T15:31:18,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c897929{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:31:18,598 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-02T15:31:18,598 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-02T15:31:18,598 WARN [BP-1550669498-172.17.0.2-1733152767991 heartbeating to localhost/127.0.0.1:36329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1550669498-172.17.0.2-1733152767991 (Datanode Uuid 59152d84-b8ba-43fe-83c0-049166250efe) service to localhost/127.0.0.1:36329 2024-12-02T15:31:18,598 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-02T15:31:18,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data1/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,599 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/cluster_91cb4ebe-02bf-547b-91b0-e80db118e0cc/data/data2/current/BP-1550669498-172.17.0.2-1733152767991 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-02T15:31:18,599 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-02T15:31:18,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@287bbdb7{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-02T15:31:18,606 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33b00b71{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-02T15:31:18,606 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-02T15:31:18,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@15dced0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-02T15:31:18,606 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c3e3f70{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-mapreduce/target/test-data/44e720fa-d04f-d823-6564-55fab94a4e60/hadoop.log.dir/,STOPPED} 2024-12-02T15:31:18,626 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-02T15:31:18,818 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down